FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/pci.c
1 /*-
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/6.4/sys/dev/pci/pci.c 182030 2008-08-23 03:43:38Z mlaier $");
31
32 #include "opt_bus.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
40 #include <sys/conf.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/types.h>
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_extern.h>
49
50 #include <sys/bus.h>
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53 #include <machine/resource.h>
54
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
57 #endif
58
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
63
64 #include "pcib_if.h"
65 #include "pci_if.h"
66
67 #if (defined(__i386__) && !defined(PC98)) || defined(__amd64__) || \
68 defined (__ia64__)
69 #include <contrib/dev/acpica/acpi.h>
70 #include "acpi_if.h"
71 #else
72 #define ACPI_PWR_FOR_SLEEP(x, y, z)
73 #endif
74
75 static uint32_t pci_mapbase(unsigned mapreg);
76 static int pci_maptype(unsigned mapreg);
77 static int pci_mapsize(unsigned testval);
78 static int pci_maprange(unsigned mapreg);
79 static void pci_fixancient(pcicfgregs *cfg);
80
81 static int pci_porten(device_t pcib, int b, int s, int f);
82 static int pci_memen(device_t pcib, int b, int s, int f);
83 static void pci_assign_interrupt(device_t bus, device_t dev,
84 int force_route);
85 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
86 int b, int s, int f, int reg,
87 struct resource_list *rl, int force, int prefetch);
88 static int pci_probe(device_t dev);
89 static int pci_attach(device_t dev);
90 static void pci_load_vendor_data(void);
91 static int pci_describe_parse_line(char **ptr, int *vendor,
92 int *device, char **desc);
93 static char *pci_describe_device(device_t dev);
94 static int pci_modevent(module_t mod, int what, void *arg);
95 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 pcicfgregs *cfg);
97 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
98 static void pci_disable_msi(device_t dev);
99 static void pci_enable_msi(device_t dev, uint64_t address,
100 uint16_t data);
101 static void pci_enable_msix(device_t dev, u_int index,
102 uint64_t address, uint32_t data);
103 static void pci_mask_msix(device_t dev, u_int index);
104 static void pci_unmask_msix(device_t dev, u_int index);
105 static int pci_msi_blacklisted(void);
106 static void pci_resume_msi(device_t dev);
107 static void pci_resume_msix(device_t dev);
108
109 static device_method_t pci_methods[] = {
110 /* Device interface */
111 DEVMETHOD(device_probe, pci_probe),
112 DEVMETHOD(device_attach, pci_attach),
113 DEVMETHOD(device_detach, bus_generic_detach),
114 DEVMETHOD(device_shutdown, bus_generic_shutdown),
115 DEVMETHOD(device_suspend, pci_suspend),
116 DEVMETHOD(device_resume, pci_resume),
117
118 /* Bus interface */
119 DEVMETHOD(bus_print_child, pci_print_child),
120 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
121 DEVMETHOD(bus_read_ivar, pci_read_ivar),
122 DEVMETHOD(bus_write_ivar, pci_write_ivar),
123 DEVMETHOD(bus_driver_added, pci_driver_added),
124 DEVMETHOD(bus_setup_intr, pci_setup_intr),
125 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
126
127 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
128 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
129 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
130 DEVMETHOD(bus_delete_resource, pci_delete_resource),
131 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
132 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
133 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
134 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
135 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
136 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
137
138 /* PCI interface */
139 DEVMETHOD(pci_read_config, pci_read_config_method),
140 DEVMETHOD(pci_write_config, pci_write_config_method),
141 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
142 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
143 DEVMETHOD(pci_enable_io, pci_enable_io_method),
144 DEVMETHOD(pci_disable_io, pci_disable_io_method),
145 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
146 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
147 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
148 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
149 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
150 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
151 DEVMETHOD(pci_release_msi, pci_release_msi_method),
152 DEVMETHOD(pci_msi_count, pci_msi_count_method),
153 DEVMETHOD(pci_msix_count, pci_msix_count_method),
154
155 { 0, 0 }
156 };
157
158 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
159
160 static devclass_t pci_devclass;
161 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
162 MODULE_VERSION(pci, 1);
163
164 static char *pci_vendordata;
165 static size_t pci_vendordata_size;
166
167
168 struct pci_quirk {
169 uint32_t devid; /* Vendor/device of the card */
170 int type;
171 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
172 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
173 int arg1;
174 int arg2;
175 };
176
177 struct pci_quirk pci_quirks[] = {
178 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
179 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
180 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
181 /* As does the Serverworks OSB4 (the SMBus mapping register) */
182 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
183
184 /*
185 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
186 * or the CMIC-SL (AKA ServerWorks GC_LE).
187 */
188 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
189 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
190
191 /*
192 * MSI doesn't work on earlier Intel chipsets including
193 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
194 */
195 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
196 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
197 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
198 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
199 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
200 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
201 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
202
203 /*
204 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
205 * bridge.
206 */
207 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
208
209 { 0 }
210 };
211
212 /* map register information */
213 #define PCI_MAPMEM 0x01 /* memory map */
214 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
215 #define PCI_MAPPORT 0x04 /* port map */
216
217 struct devlist pci_devq;
218 uint32_t pci_generation;
219 uint32_t pci_numdevs = 0;
220 static int pcie_chipset, pcix_chipset;
221
222 /* sysctl vars */
223 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
224
225 static int pci_enable_io_modes = 1;
226 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
227 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
228 &pci_enable_io_modes, 1,
229 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
230 enable these bits correctly. We'd like to do this all the time, but there\n\
231 are some peripherals that this causes problems with.");
232
233 static int pci_do_power_nodriver = 0;
234 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
235 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
236 &pci_do_power_nodriver, 0,
237 "Place a function into D3 state when no driver attaches to it. 0 means\n\
238 disable. 1 means conservatively place devices into D3 state. 2 means\n\
239 agressively place devices into D3 state. 3 means put absolutely everything\n\
240 in D3 state.");
241
242 static int pci_do_power_resume = 1;
243 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
244 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
245 &pci_do_power_resume, 1,
246 "Transition from D3 -> D0 on resume.");
247
248 static int pci_do_msi = 1;
249 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
250 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
251 "Enable support for MSI interrupts");
252
253 static int pci_do_msix = 1;
254 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
255 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
256 "Enable support for MSI-X interrupts");
257
258 static int pci_honor_msi_blacklist = 1;
259 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
260 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
261 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
262
263 /* Find a device_t by bus/slot/function */
264
265 device_t
266 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
267 {
268 struct pci_devinfo *dinfo;
269
270 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
271 if ((dinfo->cfg.bus == bus) &&
272 (dinfo->cfg.slot == slot) &&
273 (dinfo->cfg.func == func)) {
274 return (dinfo->cfg.dev);
275 }
276 }
277
278 return (NULL);
279 }
280
281 /* Find a device_t by vendor/device ID */
282
283 device_t
284 pci_find_device(uint16_t vendor, uint16_t device)
285 {
286 struct pci_devinfo *dinfo;
287
288 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
289 if ((dinfo->cfg.vendor == vendor) &&
290 (dinfo->cfg.device == device)) {
291 return (dinfo->cfg.dev);
292 }
293 }
294
295 return (NULL);
296 }
297
298 /* return base address of memory or port map */
299
300 static uint32_t
301 pci_mapbase(unsigned mapreg)
302 {
303 int mask = 0x03;
304 if ((mapreg & 0x01) == 0)
305 mask = 0x0f;
306 return (mapreg & ~mask);
307 }
308
309 /* return map type of memory or port map */
310
311 static int
312 pci_maptype(unsigned mapreg)
313 {
314 static uint8_t maptype[0x10] = {
315 PCI_MAPMEM, PCI_MAPPORT,
316 PCI_MAPMEM, 0,
317 PCI_MAPMEM, PCI_MAPPORT,
318 0, 0,
319 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
320 PCI_MAPMEM|PCI_MAPMEMP, 0,
321 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
322 0, 0,
323 };
324
325 return maptype[mapreg & 0x0f];
326 }
327
328 /* return log2 of map size decoded for memory or port map */
329
330 static int
331 pci_mapsize(unsigned testval)
332 {
333 int ln2size;
334
335 testval = pci_mapbase(testval);
336 ln2size = 0;
337 if (testval != 0) {
338 while ((testval & 1) == 0)
339 {
340 ln2size++;
341 testval >>= 1;
342 }
343 }
344 return (ln2size);
345 }
346
347 /* return log2 of address range supported by map register */
348
349 static int
350 pci_maprange(unsigned mapreg)
351 {
352 int ln2range = 0;
353 switch (mapreg & 0x07) {
354 case 0x00:
355 case 0x01:
356 case 0x05:
357 ln2range = 32;
358 break;
359 case 0x02:
360 ln2range = 20;
361 break;
362 case 0x04:
363 ln2range = 64;
364 break;
365 }
366 return (ln2range);
367 }
368
369 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
370
371 static void
372 pci_fixancient(pcicfgregs *cfg)
373 {
374 if (cfg->hdrtype != 0)
375 return;
376
377 /* PCI to PCI bridges use header type 1 */
378 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
379 cfg->hdrtype = 1;
380 }
381
382 /* extract header type specific config data */
383
384 static void
385 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
386 {
387 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
388 switch (cfg->hdrtype) {
389 case 0:
390 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
391 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
392 cfg->nummaps = PCI_MAXMAPS_0;
393 break;
394 case 1:
395 cfg->nummaps = PCI_MAXMAPS_1;
396 break;
397 case 2:
398 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
399 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
400 cfg->nummaps = PCI_MAXMAPS_2;
401 break;
402 }
403 #undef REG
404 }
405
406 /* read configuration header into pcicfgregs structure */
407
408 struct pci_devinfo *
409 pci_read_device(device_t pcib, int b, int s, int f, size_t size)
410 {
411 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
412 pcicfgregs *cfg = NULL;
413 struct pci_devinfo *devlist_entry;
414 struct devlist *devlist_head;
415
416 devlist_head = &pci_devq;
417
418 devlist_entry = NULL;
419
420 if (REG(PCIR_DEVVENDOR, 4) != -1) {
421 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
422 if (devlist_entry == NULL)
423 return (NULL);
424
425 cfg = &devlist_entry->cfg;
426
427 cfg->bus = b;
428 cfg->slot = s;
429 cfg->func = f;
430 cfg->vendor = REG(PCIR_VENDOR, 2);
431 cfg->device = REG(PCIR_DEVICE, 2);
432 cfg->cmdreg = REG(PCIR_COMMAND, 2);
433 cfg->statreg = REG(PCIR_STATUS, 2);
434 cfg->baseclass = REG(PCIR_CLASS, 1);
435 cfg->subclass = REG(PCIR_SUBCLASS, 1);
436 cfg->progif = REG(PCIR_PROGIF, 1);
437 cfg->revid = REG(PCIR_REVID, 1);
438 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
439 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
440 cfg->lattimer = REG(PCIR_LATTIMER, 1);
441 cfg->intpin = REG(PCIR_INTPIN, 1);
442 cfg->intline = REG(PCIR_INTLINE, 1);
443
444 cfg->mingnt = REG(PCIR_MINGNT, 1);
445 cfg->maxlat = REG(PCIR_MAXLAT, 1);
446
447 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
448 cfg->hdrtype &= ~PCIM_MFDEV;
449
450 pci_fixancient(cfg);
451 pci_hdrtypedata(pcib, b, s, f, cfg);
452
453 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
454 pci_read_extcap(pcib, cfg);
455
456 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
457
458 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
459 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
460 devlist_entry->conf.pc_sel.pc_func = cfg->func;
461 devlist_entry->conf.pc_hdr = cfg->hdrtype;
462
463 devlist_entry->conf.pc_subvendor = cfg->subvendor;
464 devlist_entry->conf.pc_subdevice = cfg->subdevice;
465 devlist_entry->conf.pc_vendor = cfg->vendor;
466 devlist_entry->conf.pc_device = cfg->device;
467
468 devlist_entry->conf.pc_class = cfg->baseclass;
469 devlist_entry->conf.pc_subclass = cfg->subclass;
470 devlist_entry->conf.pc_progif = cfg->progif;
471 devlist_entry->conf.pc_revid = cfg->revid;
472
473 pci_numdevs++;
474 pci_generation++;
475 }
476 return (devlist_entry);
477 #undef REG
478 }
479
480 static void
481 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
482 {
483 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
484 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
485 #if defined(__i386__) || defined(__amd64__)
486 uint64_t addr;
487 #endif
488 uint32_t val;
489 int ptr, nextptr, ptrptr;
490
491 switch (cfg->hdrtype & PCIM_HDRTYPE) {
492 case 0:
493 case 1:
494 ptrptr = PCIR_CAP_PTR;
495 break;
496 case 2:
497 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
498 break;
499 default:
500 return; /* no extended capabilities support */
501 }
502 nextptr = REG(ptrptr, 1); /* sanity check? */
503
504 /*
505 * Read capability entries.
506 */
507 while (nextptr != 0) {
508 /* Sanity check */
509 if (nextptr > 255) {
510 printf("illegal PCI extended capability offset %d\n",
511 nextptr);
512 return;
513 }
514 /* Find the next entry */
515 ptr = nextptr;
516 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
517
518 /* Process this entry */
519 switch (REG(ptr + PCICAP_ID, 1)) {
520 case PCIY_PMG: /* PCI power management */
521 if (cfg->pp.pp_cap == 0) {
522 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
523 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
524 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
525 if ((nextptr - ptr) > PCIR_POWER_DATA)
526 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
527 }
528 break;
529 #if defined(__i386__) || defined(__amd64__)
530 case PCIY_HT: /* HyperTransport */
531 /* Determine HT-specific capability type. */
532 val = REG(ptr + PCIR_HT_COMMAND, 2);
533 switch (val & PCIM_HTCMD_CAP_MASK) {
534 case PCIM_HTCAP_MSI_MAPPING:
535 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
536 /* Sanity check the mapping window. */
537 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
538 4);
539 addr <<= 32;
540 addr = REG(ptr + PCIR_HTMSI_ADDRESS_LO,
541 4);
542 if (addr != MSI_INTEL_ADDR_BASE)
543 device_printf(pcib,
544 "HT Bridge at %d:%d:%d has non-default MSI window 0x%llx\n",
545 cfg->bus, cfg->slot,
546 cfg->func, (long long)addr);
547 } else
548 addr = MSI_INTEL_ADDR_BASE;
549
550 cfg->ht.ht_msimap = ptr;
551 cfg->ht.ht_msictrl = val;
552 cfg->ht.ht_msiaddr = addr;
553 break;
554 }
555 break;
556 #endif
557 case PCIY_MSI: /* PCI MSI */
558 cfg->msi.msi_location = ptr;
559 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
560 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
561 PCIM_MSICTRL_MMC_MASK)>>1);
562 break;
563 case PCIY_MSIX: /* PCI MSI-X */
564 cfg->msix.msix_location = ptr;
565 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
566 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
567 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
568 val = REG(ptr + PCIR_MSIX_TABLE, 4);
569 cfg->msix.msix_table_bar = PCIR_BAR(val &
570 PCIM_MSIX_BIR_MASK);
571 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
572 val = REG(ptr + PCIR_MSIX_PBA, 4);
573 cfg->msix.msix_pba_bar = PCIR_BAR(val &
574 PCIM_MSIX_BIR_MASK);
575 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
576 break;
577 case PCIY_SUBVENDOR:
578 /* Should always be true. */
579 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
580 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
581 cfg->subvendor = val & 0xffff;
582 cfg->subdevice = val >> 16;
583 }
584 break;
585 case PCIY_PCIX: /* PCI-X */
586 /*
587 * Assume we have a PCI-X chipset if we have
588 * at least one PCI-PCI bridge with a PCI-X
589 * capability. Note that some systems with
590 * PCI-express or HT chipsets might match on
591 * this check as well.
592 */
593 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
594 pcix_chipset = 1;
595 break;
596 case PCIY_EXPRESS: /* PCI-express */
597 /*
598 * Assume we have a PCI-express chipset if we have
599 * at least one PCI-express device.
600 */
601 pcie_chipset = 1;
602 break;
603 default:
604 break;
605 }
606 }
607 #undef REG
608 #undef WREG
609 }
610
611 /*
612 * Return the offset in configuration space of the requested extended
613 * capability entry or 0 if the specified capability was not found.
614 */
615 int
616 pci_find_extcap_method(device_t dev, device_t child, int capability,
617 int *capreg)
618 {
619 struct pci_devinfo *dinfo = device_get_ivars(child);
620 pcicfgregs *cfg = &dinfo->cfg;
621 u_int32_t status;
622 u_int8_t ptr;
623
624 /*
625 * Check the CAP_LIST bit of the PCI status register first.
626 */
627 status = pci_read_config(child, PCIR_STATUS, 2);
628 if (!(status & PCIM_STATUS_CAPPRESENT))
629 return (ENXIO);
630
631 /*
632 * Determine the start pointer of the capabilities list.
633 */
634 switch (cfg->hdrtype & PCIM_HDRTYPE) {
635 case 0:
636 case 1:
637 ptr = PCIR_CAP_PTR;
638 break;
639 case 2:
640 ptr = PCIR_CAP_PTR_2;
641 break;
642 default:
643 /* XXX: panic? */
644 return (ENXIO); /* no extended capabilities support */
645 }
646 ptr = pci_read_config(child, ptr, 1);
647
648 /*
649 * Traverse the capabilities list.
650 */
651 while (ptr != 0) {
652 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
653 if (capreg != NULL)
654 *capreg = ptr;
655 return (0);
656 }
657 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
658 }
659
660 return (ENOENT);
661 }
662
663 /*
664 * Support for MSI-X message interrupts.
665 */
666 void
667 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
668 {
669 struct pci_devinfo *dinfo = device_get_ivars(dev);
670 struct pcicfg_msix *msix = &dinfo->cfg.msix;
671 uint32_t offset;
672
673 KASSERT(msix->msix_table_len > index, ("bogus index"));
674 offset = msix->msix_table_offset + index * 16;
675 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
676 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
677 bus_write_4(msix->msix_table_res, offset + 8, data);
678
679 /* Enable MSI -> HT mapping. */
680 pci_ht_map_msi(dev, address);
681 }
682
683 void
684 pci_mask_msix(device_t dev, u_int index)
685 {
686 struct pci_devinfo *dinfo = device_get_ivars(dev);
687 struct pcicfg_msix *msix = &dinfo->cfg.msix;
688 uint32_t offset, val;
689
690 KASSERT(msix->msix_msgnum > index, ("bogus index"));
691 offset = msix->msix_table_offset + index * 16 + 12;
692 val = bus_read_4(msix->msix_table_res, offset);
693 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
694 val |= PCIM_MSIX_VCTRL_MASK;
695 bus_write_4(msix->msix_table_res, offset, val);
696 }
697 }
698
699 void
700 pci_unmask_msix(device_t dev, u_int index)
701 {
702 struct pci_devinfo *dinfo = device_get_ivars(dev);
703 struct pcicfg_msix *msix = &dinfo->cfg.msix;
704 uint32_t offset, val;
705
706 KASSERT(msix->msix_table_len > index, ("bogus index"));
707 offset = msix->msix_table_offset + index * 16 + 12;
708 val = bus_read_4(msix->msix_table_res, offset);
709 if (val & PCIM_MSIX_VCTRL_MASK) {
710 val &= ~PCIM_MSIX_VCTRL_MASK;
711 bus_write_4(msix->msix_table_res, offset, val);
712 }
713 }
714
715 int
716 pci_pending_msix(device_t dev, u_int index)
717 {
718 struct pci_devinfo *dinfo = device_get_ivars(dev);
719 struct pcicfg_msix *msix = &dinfo->cfg.msix;
720 uint32_t offset, bit;
721
722 KASSERT(msix->msix_table_len > index, ("bogus index"));
723 offset = msix->msix_pba_offset + (index / 32) * 4;
724 bit = 1 << index % 32;
725 return (bus_read_4(msix->msix_pba_res, offset) & bit);
726 }
727
728 /*
729 * Restore MSI-X registers and table during resume. If MSI-X is
730 * enabled then walk the virtual table to restore the actual MSI-X
731 * table.
732 */
733 static void
734 pci_resume_msix(device_t dev)
735 {
736 struct pci_devinfo *dinfo = device_get_ivars(dev);
737 struct pcicfg_msix *msix = &dinfo->cfg.msix;
738 struct msix_table_entry *mte;
739 struct msix_vector *mv;
740 int i;
741
742 if (msix->msix_alloc > 0) {
743 /* First, mask all vectors. */
744 for (i = 0; i < msix->msix_msgnum; i++)
745 pci_mask_msix(dev, i);
746
747 /* Second, program any messages with at least one handler. */
748 for (i = 0; i < msix->msix_table_len; i++) {
749 mte = &msix->msix_table[i];
750 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
751 continue;
752 mv = &msix->msix_vectors[mte->mte_vector - 1];
753 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
754 pci_unmask_msix(dev, i);
755 }
756 }
757 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
758 msix->msix_ctrl, 2);
759 }
760
761 /*
762 * Attempt to allocate *count MSI-X messages. The actual number allocated is
763 * returned in *count. After this function returns, each message will be
764 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
765 */
766 int
767 pci_alloc_msix_method(device_t dev, device_t child, int *count)
768 {
769 struct pci_devinfo *dinfo = device_get_ivars(child);
770 pcicfgregs *cfg = &dinfo->cfg;
771 struct resource_list_entry *rle;
772 int actual, error, i, irq, max;
773
774 /* Don't let count == 0 get us into trouble. */
775 if (*count == 0)
776 return (EINVAL);
777
778 /* If rid 0 is allocated, then fail. */
779 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
780 if (rle != NULL && rle->res != NULL)
781 return (ENXIO);
782
783 /* Already have allocated messages? */
784 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
785 return (ENXIO);
786
787 /* If MSI is blacklisted for this system, fail. */
788 if (pci_msi_blacklisted())
789 return (ENXIO);
790
791 /* MSI-X capability present? */
792 if (cfg->msix.msix_location == 0 || !pci_do_msix)
793 return (ENODEV);
794
795 /* Make sure the appropriate BARs are mapped. */
796 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
797 cfg->msix.msix_table_bar);
798 if (rle == NULL || rle->res == NULL ||
799 !(rman_get_flags(rle->res) & RF_ACTIVE))
800 return (ENXIO);
801 cfg->msix.msix_table_res = rle->res;
802 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
803 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
804 cfg->msix.msix_pba_bar);
805 if (rle == NULL || rle->res == NULL ||
806 !(rman_get_flags(rle->res) & RF_ACTIVE))
807 return (ENXIO);
808 }
809 cfg->msix.msix_pba_res = rle->res;
810
811 if (bootverbose)
812 device_printf(child,
813 "attempting to allocate %d MSI-X vectors (%d supported)\n",
814 *count, cfg->msix.msix_msgnum);
815 max = min(*count, cfg->msix.msix_msgnum);
816 for (i = 0; i < max; i++) {
817 /* Allocate a message. */
818 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
819 if (error)
820 break;
821 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
822 irq, 1);
823 }
824 actual = i;
825
826 if (bootverbose) {
827 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
828 if (actual == 1)
829 device_printf(child, "using IRQ %lu for MSI-X\n",
830 rle->start);
831 else {
832 int run;
833
834 /*
835 * Be fancy and try to print contiguous runs of
836 * IRQ values as ranges. 'irq' is the previous IRQ.
837 * 'run' is true if we are in a range.
838 */
839 device_printf(child, "using IRQs %lu", rle->start);
840 irq = rle->start;
841 run = 0;
842 for (i = 1; i < actual; i++) {
843 rle = resource_list_find(&dinfo->resources,
844 SYS_RES_IRQ, i + 1);
845
846 /* Still in a run? */
847 if (rle->start == irq + 1) {
848 run = 1;
849 irq++;
850 continue;
851 }
852
853 /* Finish previous range. */
854 if (run) {
855 printf("-%d", irq);
856 run = 0;
857 }
858
859 /* Start new range. */
860 printf(",%lu", rle->start);
861 irq = rle->start;
862 }
863
864 /* Unfinished range? */
865 if (run)
866 printf("-%d", irq);
867 printf(" for MSI-X\n");
868 }
869 }
870
871 /* Mask all vectors. */
872 for (i = 0; i < cfg->msix.msix_msgnum; i++)
873 pci_mask_msix(child, i);
874
875 /* Allocate and initialize vector data and virtual table. */
876 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
877 M_DEVBUF, M_WAITOK | M_ZERO);
878 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
879 M_DEVBUF, M_WAITOK | M_ZERO);
880 for (i = 0; i < actual; i++) {
881 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
882 cfg->msix.msix_vectors[i].mv_irq = rle->start;
883 cfg->msix.msix_table[i].mte_vector = i + 1;
884 }
885
886 /* Update control register to enable MSI-X. */
887 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
888 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
889 cfg->msix.msix_ctrl, 2);
890
891 /* Update counts of alloc'd messages. */
892 cfg->msix.msix_alloc = actual;
893 cfg->msix.msix_table_len = actual;
894 *count = actual;
895 return (0);
896 }
897
898 static int
899 pci_release_msix(device_t dev, device_t child)
900 {
901 struct pci_devinfo *dinfo = device_get_ivars(child);
902 struct pcicfg_msix *msix = &dinfo->cfg.msix;
903 struct resource_list_entry *rle;
904 int i;
905
906 /* Do we have any messages to release? */
907 if (msix->msix_alloc == 0)
908 return (ENODEV);
909
910 /* Make sure none of the resources are allocated. */
911 for (i = 0; i < msix->msix_table_len; i++) {
912 if (msix->msix_table[i].mte_vector == 0)
913 continue;
914 if (msix->msix_table[i].mte_handlers > 0)
915 return (EBUSY);
916 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
917 KASSERT(rle != NULL, ("missing resource"));
918 if (rle->res != NULL)
919 return (EBUSY);
920 }
921
922 /* Update control register to disable MSI-X. */
923 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
924 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
925 msix->msix_ctrl, 2);
926
927 /* Free the resource list entries. */
928 for (i = 0; i < msix->msix_table_len; i++) {
929 if (msix->msix_table[i].mte_vector == 0)
930 continue;
931 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
932 }
933 free(msix->msix_table, M_DEVBUF);
934 msix->msix_table_len = 0;
935
936 /* Release the IRQs. */
937 for (i = 0; i < msix->msix_alloc; i++)
938 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
939 msix->msix_vectors[i].mv_irq);
940 free(msix->msix_vectors, M_DEVBUF);
941 msix->msix_alloc = 0;
942 return (0);
943 }
944
945 /*
946 * Return the max supported MSI-X messages this device supports.
947 * Basically, assuming the MD code can alloc messages, this function
948 * should return the maximum value that pci_alloc_msix() can return.
949 * Thus, it is subject to the tunables, etc.
950 */
951 int
952 pci_msix_count_method(device_t dev, device_t child)
953 {
954 struct pci_devinfo *dinfo = device_get_ivars(child);
955 struct pcicfg_msix *msix = &dinfo->cfg.msix;
956
957 if (pci_do_msix && msix->msix_location != 0)
958 return (msix->msix_msgnum);
959 return (0);
960 }
961
962 /*
963 * HyperTransport MSI mapping control
964 */
965 void
966 pci_ht_map_msi(device_t dev, uint64_t addr)
967 {
968 struct pci_devinfo *dinfo = device_get_ivars(dev);
969 struct pcicfg_ht *ht = &dinfo->cfg.ht;
970
971 if (!ht->ht_msimap)
972 return;
973
974 if (addr && !(ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) &&
975 ht->ht_msiaddr >> 20 == addr >> 20) {
976 /* Enable MSI -> HT mapping. */
977 ht->ht_msictrl |= PCIM_HTCMD_MSI_ENABLE;
978 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
979 ht->ht_msictrl, 2);
980 }
981
982 if (!addr && ht->ht_msictrl & PCIM_HTCMD_MSI_ENABLE) {
983 /* Disable MSI -> HT mapping. */
984 ht->ht_msictrl &= ~PCIM_HTCMD_MSI_ENABLE;
985 pci_write_config(dev, ht->ht_msimap + PCIR_HT_COMMAND,
986 ht->ht_msictrl, 2);
987 }
988 }
989
990 /*
991 * Support for MSI message signalled interrupts.
992 */
993 void
994 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
995 {
996 struct pci_devinfo *dinfo = device_get_ivars(dev);
997 struct pcicfg_msi *msi = &dinfo->cfg.msi;
998
999 /* Write data and address values. */
1000 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1001 address & 0xffffffff, 4);
1002 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1003 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
1004 address >> 32, 4);
1005 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
1006 data, 2);
1007 } else
1008 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
1009 2);
1010
1011 /* Enable MSI in the control register. */
1012 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
1013 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1014 2);
1015
1016 /* Enable MSI -> HT mapping. */
1017 pci_ht_map_msi(dev, address);
1018 }
1019
1020 void
1021 pci_disable_msi(device_t dev)
1022 {
1023 struct pci_devinfo *dinfo = device_get_ivars(dev);
1024 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1025
1026 /* Disable MSI -> HT mapping. */
1027 pci_ht_map_msi(dev, 0);
1028
1029 /* Disable MSI in the control register. */
1030 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
1031 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1032 2);
1033 }
1034
1035 /*
1036 * Restore MSI registers during resume. If MSI is enabled then
1037 * restore the data and address registers in addition to the control
1038 * register.
1039 */
1040 static void
1041 pci_resume_msi(device_t dev)
1042 {
1043 struct pci_devinfo *dinfo = device_get_ivars(dev);
1044 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1045 uint64_t address;
1046 uint16_t data;
1047
1048 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1049 address = msi->msi_addr;
1050 data = msi->msi_data;
1051 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1052 address & 0xffffffff, 4);
1053 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1054 pci_write_config(dev, msi->msi_location +
1055 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1056 pci_write_config(dev, msi->msi_location +
1057 PCIR_MSI_DATA_64BIT, data, 2);
1058 } else
1059 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1060 data, 2);
1061 }
1062 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1063 2);
1064 }
1065
1066 int
1067 pci_remap_msi_irq(device_t dev, u_int irq)
1068 {
1069 struct pci_devinfo *dinfo = device_get_ivars(dev);
1070 pcicfgregs *cfg = &dinfo->cfg;
1071 struct resource_list_entry *rle;
1072 struct msix_table_entry *mte;
1073 struct msix_vector *mv;
1074 device_t bus;
1075 uint64_t addr;
1076 uint32_t data;
1077 int error, i, j;
1078
1079 bus = device_get_parent(dev);
1080
1081 /*
1082 * Handle MSI first. We try to find this IRQ among our list
1083 * of MSI IRQs. If we find it, we request updated address and
1084 * data registers and apply the results.
1085 */
1086 if (cfg->msi.msi_alloc > 0) {
1087
1088 /* If we don't have any active handlers, nothing to do. */
1089 if (cfg->msi.msi_handlers == 0)
1090 return (0);
1091 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1092 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1093 i + 1);
1094 if (rle->start == irq) {
1095 error = PCIB_MAP_MSI(device_get_parent(bus),
1096 dev, irq, &addr, &data);
1097 if (error)
1098 return (error);
1099 pci_disable_msi(dev);
1100 dinfo->cfg.msi.msi_addr = addr;
1101 dinfo->cfg.msi.msi_data = data;
1102 pci_enable_msi(dev, addr, data);
1103 return (0);
1104 }
1105 }
1106 return (ENOENT);
1107 }
1108
1109 /*
1110 * For MSI-X, we check to see if we have this IRQ. If we do,
1111 * we request the updated mapping info. If that works, we go
1112 * through all the slots that use this IRQ and update them.
1113 */
1114 if (cfg->msix.msix_alloc > 0) {
1115 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1116 mv = &cfg->msix.msix_vectors[i];
1117 if (mv->mv_irq == irq) {
1118 error = PCIB_MAP_MSI(device_get_parent(bus),
1119 dev, irq, &addr, &data);
1120 if (error)
1121 return (error);
1122 mv->mv_address = addr;
1123 mv->mv_data = data;
1124 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1125 mte = &cfg->msix.msix_table[j];
1126 if (mte->mte_vector != i + 1)
1127 continue;
1128 if (mte->mte_handlers == 0)
1129 continue;
1130 pci_mask_msix(dev, j);
1131 pci_enable_msix(dev, j, addr, data);
1132 pci_unmask_msix(dev, j);
1133 }
1134 }
1135 }
1136 return (ENOENT);
1137 }
1138
1139 return (ENOENT);
1140 }
1141
1142 /*
1143 * Returns true if the specified device is blacklisted because MSI
1144 * doesn't work.
1145 */
1146 int
1147 pci_msi_device_blacklisted(device_t dev)
1148 {
1149 struct pci_quirk *q;
1150
1151 if (!pci_honor_msi_blacklist)
1152 return (0);
1153
1154 for (q = &pci_quirks[0]; q->devid; q++) {
1155 if (q->devid == pci_get_devid(dev) &&
1156 q->type == PCI_QUIRK_DISABLE_MSI)
1157 return (1);
1158 }
1159 return (0);
1160 }
1161
1162 /*
1163 * Determine if MSI is blacklisted globally on this sytem. Currently,
1164 * we just check for blacklisted chipsets as represented by the
1165 * host-PCI bridge at device 0:0:0. In the future, it may become
1166 * necessary to check other system attributes, such as the kenv values
1167 * that give the motherboard manufacturer and model number.
1168 */
1169 static int
1170 pci_msi_blacklisted(void)
1171 {
1172 device_t dev;
1173
1174 if (!pci_honor_msi_blacklist)
1175 return (0);
1176
1177 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1178 if (!(pcie_chipset || pcix_chipset))
1179 return (1);
1180
1181 dev = pci_find_bsf(0, 0, 0);
1182 if (dev != NULL)
1183 return (pci_msi_device_blacklisted(dev));
1184 return (0);
1185 }
1186
1187 /*
1188 * Attempt to allocate *count MSI messages. The actual number allocated is
1189 * returned in *count. After this function returns, each message will be
1190 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1191 */
1192 int
1193 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1194 {
1195 struct pci_devinfo *dinfo = device_get_ivars(child);
1196 pcicfgregs *cfg = &dinfo->cfg;
1197 struct resource_list_entry *rle;
1198 int actual, error, i, irqs[32];
1199 uint16_t ctrl;
1200
1201 /* Don't let count == 0 get us into trouble. */
1202 if (*count == 0)
1203 return (EINVAL);
1204
1205 /* If rid 0 is allocated, then fail. */
1206 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1207 if (rle != NULL && rle->res != NULL)
1208 return (ENXIO);
1209
1210 /* Already have allocated messages? */
1211 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1212 return (ENXIO);
1213
1214 /* If MSI is blacklisted for this system, fail. */
1215 if (pci_msi_blacklisted())
1216 return (ENXIO);
1217
1218 /* MSI capability present? */
1219 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1220 return (ENODEV);
1221
1222 if (bootverbose)
1223 device_printf(child,
1224 "attempting to allocate %d MSI vectors (%d supported)\n",
1225 *count, cfg->msi.msi_msgnum);
1226
1227 /* Don't ask for more than the device supports. */
1228 actual = min(*count, cfg->msi.msi_msgnum);
1229
1230 /* Don't ask for more than 32 messages. */
1231 actual = min(actual, 32);
1232
1233 /* MSI requires power of 2 number of messages. */
1234 if (!powerof2(actual))
1235 return (EINVAL);
1236
1237 for (;;) {
1238 /* Try to allocate N messages. */
1239 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1240 cfg->msi.msi_msgnum, irqs);
1241 if (error == 0)
1242 break;
1243 if (actual == 1)
1244 return (error);
1245
1246 /* Try N / 2. */
1247 actual >>= 1;
1248 }
1249
1250 /*
1251 * We now have N actual messages mapped onto SYS_RES_IRQ
1252 * resources in the irqs[] array, so add new resources
1253 * starting at rid 1.
1254 */
1255 for (i = 0; i < actual; i++)
1256 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1257 irqs[i], irqs[i], 1);
1258
1259 if (bootverbose) {
1260 if (actual == 1)
1261 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1262 else {
1263 int run;
1264
1265 /*
1266 * Be fancy and try to print contiguous runs
1267 * of IRQ values as ranges. 'run' is true if
1268 * we are in a range.
1269 */
1270 device_printf(child, "using IRQs %d", irqs[0]);
1271 run = 0;
1272 for (i = 1; i < actual; i++) {
1273
1274 /* Still in a run? */
1275 if (irqs[i] == irqs[i - 1] + 1) {
1276 run = 1;
1277 continue;
1278 }
1279
1280 /* Finish previous range. */
1281 if (run) {
1282 printf("-%d", irqs[i - 1]);
1283 run = 0;
1284 }
1285
1286 /* Start new range. */
1287 printf(",%d", irqs[i]);
1288 }
1289
1290 /* Unfinished range? */
1291 if (run)
1292 printf("-%d", irqs[actual - 1]);
1293 printf(" for MSI\n");
1294 }
1295 }
1296
1297 /* Update control register with actual count. */
1298 ctrl = cfg->msi.msi_ctrl;
1299 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1300 ctrl |= (ffs(actual) - 1) << 4;
1301 cfg->msi.msi_ctrl = ctrl;
1302 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1303
1304 /* Update counts of alloc'd messages. */
1305 cfg->msi.msi_alloc = actual;
1306 cfg->msi.msi_handlers = 0;
1307 *count = actual;
1308 return (0);
1309 }
1310
1311 /* Release the MSI messages associated with this device. */
1312 int
1313 pci_release_msi_method(device_t dev, device_t child)
1314 {
1315 struct pci_devinfo *dinfo = device_get_ivars(child);
1316 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1317 struct resource_list_entry *rle;
1318 int error, i, irqs[32];
1319
1320 /* Try MSI-X first. */
1321 error = pci_release_msix(dev, child);
1322 if (error != ENODEV)
1323 return (error);
1324
1325 /* Do we have any messages to release? */
1326 if (msi->msi_alloc == 0)
1327 return (ENODEV);
1328 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1329
1330 /* Make sure none of the resources are allocated. */
1331 if (msi->msi_handlers > 0)
1332 return (EBUSY);
1333 for (i = 0; i < msi->msi_alloc; i++) {
1334 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1335 KASSERT(rle != NULL, ("missing MSI resource"));
1336 if (rle->res != NULL)
1337 return (EBUSY);
1338 irqs[i] = rle->start;
1339 }
1340
1341 /* Update control register with 0 count. */
1342 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1343 ("%s: MSI still enabled", __func__));
1344 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
1345 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1346 msi->msi_ctrl, 2);
1347
1348 /* Release the messages. */
1349 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
1350 for (i = 0; i < msi->msi_alloc; i++)
1351 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1352
1353 /* Update alloc count. */
1354 msi->msi_alloc = 0;
1355 msi->msi_addr = 0;
1356 msi->msi_data = 0;
1357 return (0);
1358 }
1359
1360 /*
1361 * Return the max supported MSI messages this device supports.
1362 * Basically, assuming the MD code can alloc messages, this function
1363 * should return the maximum value that pci_alloc_msi() can return.
1364 * Thus, it is subject to the tunables, etc.
1365 */
1366 int
1367 pci_msi_count_method(device_t dev, device_t child)
1368 {
1369 struct pci_devinfo *dinfo = device_get_ivars(child);
1370 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1371
1372 if (pci_do_msi && msi->msi_location != 0)
1373 return (msi->msi_msgnum);
1374 return (0);
1375 }
1376
1377 /* free pcicfgregs structure and all depending data structures */
1378
1379 int
1380 pci_freecfg(struct pci_devinfo *dinfo)
1381 {
1382 struct devlist *devlist_head;
1383
1384 devlist_head = &pci_devq;
1385
1386 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1387 free(dinfo, M_DEVBUF);
1388
1389 /* increment the generation count */
1390 pci_generation++;
1391
1392 /* we're losing one device */
1393 pci_numdevs--;
1394 return (0);
1395 }
1396
1397 /*
1398 * PCI power manangement
1399 */
1400 int
1401 pci_set_powerstate_method(device_t dev, device_t child, int state)
1402 {
1403 struct pci_devinfo *dinfo = device_get_ivars(child);
1404 pcicfgregs *cfg = &dinfo->cfg;
1405 uint16_t status;
1406 int result, oldstate, highest, delay;
1407
1408 if (cfg->pp.pp_cap == 0)
1409 return (EOPNOTSUPP);
1410
1411 /*
1412 * Optimize a no state change request away. While it would be OK to
1413 * write to the hardware in theory, some devices have shown odd
1414 * behavior when going from D3 -> D3.
1415 */
1416 oldstate = pci_get_powerstate(child);
1417 if (oldstate == state)
1418 return (0);
1419
1420 /*
1421 * The PCI power management specification states that after a state
1422 * transition between PCI power states, system software must
1423 * guarantee a minimal delay before the function accesses the device.
1424 * Compute the worst case delay that we need to guarantee before we
1425 * access the device. Many devices will be responsive much more
1426 * quickly than this delay, but there are some that don't respond
1427 * instantly to state changes. Transitions to/from D3 state require
1428 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1429 * is done below with DELAY rather than a sleeper function because
1430 * this function can be called from contexts where we cannot sleep.
1431 */
1432 highest = (oldstate > state) ? oldstate : state;
1433 if (highest == PCI_POWERSTATE_D3)
1434 delay = 10000;
1435 else if (highest == PCI_POWERSTATE_D2)
1436 delay = 200;
1437 else
1438 delay = 0;
1439 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1440 & ~PCIM_PSTAT_DMASK;
1441 result = 0;
1442 switch (state) {
1443 case PCI_POWERSTATE_D0:
1444 status |= PCIM_PSTAT_D0;
1445 break;
1446 case PCI_POWERSTATE_D1:
1447 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1448 return (EOPNOTSUPP);
1449 status |= PCIM_PSTAT_D1;
1450 break;
1451 case PCI_POWERSTATE_D2:
1452 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1453 return (EOPNOTSUPP);
1454 status |= PCIM_PSTAT_D2;
1455 break;
1456 case PCI_POWERSTATE_D3:
1457 status |= PCIM_PSTAT_D3;
1458 break;
1459 default:
1460 return (EINVAL);
1461 }
1462
1463 if (bootverbose)
1464 printf(
1465 "pci%d:%d:%d: Transition from D%d to D%d\n",
1466 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func,
1467 oldstate, state);
1468
1469 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1470 if (delay)
1471 DELAY(delay);
1472 return (0);
1473 }
1474
1475 int
1476 pci_get_powerstate_method(device_t dev, device_t child)
1477 {
1478 struct pci_devinfo *dinfo = device_get_ivars(child);
1479 pcicfgregs *cfg = &dinfo->cfg;
1480 uint16_t status;
1481 int result;
1482
1483 if (cfg->pp.pp_cap != 0) {
1484 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1485 switch (status & PCIM_PSTAT_DMASK) {
1486 case PCIM_PSTAT_D0:
1487 result = PCI_POWERSTATE_D0;
1488 break;
1489 case PCIM_PSTAT_D1:
1490 result = PCI_POWERSTATE_D1;
1491 break;
1492 case PCIM_PSTAT_D2:
1493 result = PCI_POWERSTATE_D2;
1494 break;
1495 case PCIM_PSTAT_D3:
1496 result = PCI_POWERSTATE_D3;
1497 break;
1498 default:
1499 result = PCI_POWERSTATE_UNKNOWN;
1500 break;
1501 }
1502 } else {
1503 /* No support, device is always at D0 */
1504 result = PCI_POWERSTATE_D0;
1505 }
1506 return (result);
1507 }
1508
1509 /*
1510 * Some convenience functions for PCI device drivers.
1511 */
1512
1513 static __inline void
1514 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
1515 {
1516 uint16_t command;
1517
1518 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1519 command |= bit;
1520 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1521 }
1522
1523 static __inline void
1524 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
1525 {
1526 uint16_t command;
1527
1528 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1529 command &= ~bit;
1530 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1531 }
1532
1533 int
1534 pci_enable_busmaster_method(device_t dev, device_t child)
1535 {
1536 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1537 return (0);
1538 }
1539
1540 int
1541 pci_disable_busmaster_method(device_t dev, device_t child)
1542 {
1543 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1544 return (0);
1545 }
1546
1547 int
1548 pci_enable_io_method(device_t dev, device_t child, int space)
1549 {
1550 uint16_t command;
1551 uint16_t bit;
1552 char *error;
1553
1554 bit = 0;
1555 error = NULL;
1556
1557 switch(space) {
1558 case SYS_RES_IOPORT:
1559 bit = PCIM_CMD_PORTEN;
1560 error = "port";
1561 break;
1562 case SYS_RES_MEMORY:
1563 bit = PCIM_CMD_MEMEN;
1564 error = "memory";
1565 break;
1566 default:
1567 return (EINVAL);
1568 }
1569 pci_set_command_bit(dev, child, bit);
1570 /* Some devices seem to need a brief stall here, what do to? */
1571 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1572 if (command & bit)
1573 return (0);
1574 device_printf(child, "failed to enable %s mapping!\n", error);
1575 return (ENXIO);
1576 }
1577
1578 int
1579 pci_disable_io_method(device_t dev, device_t child, int space)
1580 {
1581 uint16_t command;
1582 uint16_t bit;
1583 char *error;
1584
1585 bit = 0;
1586 error = NULL;
1587
1588 switch(space) {
1589 case SYS_RES_IOPORT:
1590 bit = PCIM_CMD_PORTEN;
1591 error = "port";
1592 break;
1593 case SYS_RES_MEMORY:
1594 bit = PCIM_CMD_MEMEN;
1595 error = "memory";
1596 break;
1597 default:
1598 return (EINVAL);
1599 }
1600 pci_clear_command_bit(dev, child, bit);
1601 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1602 if (command & bit) {
1603 device_printf(child, "failed to disable %s mapping!\n", error);
1604 return (ENXIO);
1605 }
1606 return (0);
1607 }
1608
1609 /*
1610 * New style pci driver. Parent device is either a pci-host-bridge or a
1611 * pci-pci-bridge. Both kinds are represented by instances of pcib.
1612 */
1613
1614 void
1615 pci_print_verbose(struct pci_devinfo *dinfo)
1616 {
1617 if (bootverbose) {
1618 pcicfgregs *cfg = &dinfo->cfg;
1619
1620 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
1621 cfg->vendor, cfg->device, cfg->revid);
1622 printf("\tbus=%d, slot=%d, func=%d\n",
1623 cfg->bus, cfg->slot, cfg->func);
1624 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
1625 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
1626 cfg->mfdev);
1627 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
1628 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
1629 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
1630 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
1631 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
1632 if (cfg->intpin > 0)
1633 printf("\tintpin=%c, irq=%d\n",
1634 cfg->intpin +'a' -1, cfg->intline);
1635 if (cfg->pp.pp_cap) {
1636 uint16_t status;
1637
1638 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
1639 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
1640 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
1641 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
1642 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
1643 status & PCIM_PSTAT_DMASK);
1644 }
1645 if (cfg->msi.msi_location) {
1646 int ctrl;
1647
1648 ctrl = cfg->msi.msi_ctrl;
1649 printf("\tMSI supports %d message%s%s%s\n",
1650 cfg->msi.msi_msgnum,
1651 (cfg->msi.msi_msgnum == 1) ? "" : "s",
1652 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
1653 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
1654 }
1655 if (cfg->msix.msix_location) {
1656 printf("\tMSI-X supports %d message%s ",
1657 cfg->msix.msix_msgnum,
1658 (cfg->msix.msix_msgnum == 1) ? "" : "s");
1659 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
1660 printf("in map 0x%x\n",
1661 cfg->msix.msix_table_bar);
1662 else
1663 printf("in maps 0x%x and 0x%x\n",
1664 cfg->msix.msix_table_bar,
1665 cfg->msix.msix_pba_bar);
1666 }
1667 }
1668 }
1669
1670 static int
1671 pci_porten(device_t pcib, int b, int s, int f)
1672 {
1673 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1674 & PCIM_CMD_PORTEN) != 0;
1675 }
1676
1677 static int
1678 pci_memen(device_t pcib, int b, int s, int f)
1679 {
1680 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1681 & PCIM_CMD_MEMEN) != 0;
1682 }
1683
1684 /*
1685 * Add a resource based on a pci map register. Return 1 if the map
1686 * register is a 32bit map register or 2 if it is a 64bit register.
1687 */
1688 static int
1689 pci_add_map(device_t pcib, device_t bus, device_t dev,
1690 int b, int s, int f, int reg, struct resource_list *rl, int force,
1691 int prefetch)
1692 {
1693 uint32_t map;
1694 uint64_t base;
1695 uint64_t start, end, count;
1696 uint8_t ln2size;
1697 uint8_t ln2range;
1698 uint32_t testval;
1699 uint16_t cmd;
1700 int type;
1701 int barlen;
1702 struct resource *res;
1703
1704 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1705 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
1706 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1707 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
1708
1709 if (pci_maptype(map) & PCI_MAPMEM)
1710 type = SYS_RES_MEMORY;
1711 else
1712 type = SYS_RES_IOPORT;
1713 ln2size = pci_mapsize(testval);
1714 ln2range = pci_maprange(testval);
1715 base = pci_mapbase(map);
1716 barlen = ln2range == 64 ? 2 : 1;
1717
1718 /*
1719 * For I/O registers, if bottom bit is set, and the next bit up
1720 * isn't clear, we know we have a BAR that doesn't conform to the
1721 * spec, so ignore it. Also, sanity check the size of the data
1722 * areas to the type of memory involved. Memory must be at least
1723 * 16 bytes in size, while I/O ranges must be at least 4.
1724 */
1725 if ((testval & 0x1) == 0x1 &&
1726 (testval & 0x2) != 0)
1727 return (barlen);
1728 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
1729 (type == SYS_RES_IOPORT && ln2size < 2))
1730 return (barlen);
1731
1732 if (ln2range == 64)
1733 /* Read the other half of a 64bit map register */
1734 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
1735
1736 if (bootverbose) {
1737 printf("\tmap[%02x]: type %x, range %2d, base %08x, size %2d",
1738 reg, pci_maptype(map), ln2range,
1739 (unsigned int) base, ln2size);
1740 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1741 printf(", port disabled\n");
1742 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1743 printf(", memory disabled\n");
1744 else
1745 printf(", enabled\n");
1746 }
1747
1748 /*
1749 * If base is 0, then we have problems. It is best to ignore
1750 * such entries for the moment. These will be allocated later if
1751 * the driver specifically requests them. However, some
1752 * removable busses look better when all resources are allocated,
1753 * so allow '' to be overriden.
1754 *
1755 * Similarly treat maps whose values is the same as the test value
1756 * read back. These maps have had all f's written to them by the
1757 * BIOS in an attempt to disable the resources.
1758 */
1759 if (!force && (base == 0 || map == testval))
1760 return (barlen);
1761
1762 /*
1763 * This code theoretically does the right thing, but has
1764 * undesirable side effects in some cases where peripherals
1765 * respond oddly to having these bits enabled. Let the user
1766 * be able to turn them off (since pci_enable_io_modes is 1 by
1767 * default).
1768 */
1769 if (pci_enable_io_modes) {
1770 /* Turn on resources that have been left off by a lazy BIOS */
1771 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
1772 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1773 cmd |= PCIM_CMD_PORTEN;
1774 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1775 }
1776 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
1777 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1778 cmd |= PCIM_CMD_MEMEN;
1779 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1780 }
1781 } else {
1782 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1783 return (barlen);
1784 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1785 return (barlen);
1786 }
1787
1788 count = 1 << ln2size;
1789 if (base == 0 || base == pci_mapbase(testval)) {
1790 start = 0; /* Let the parent decide. */
1791 end = ~0ULL;
1792 } else {
1793 start = base;
1794 end = base + (1 << ln2size) - 1;
1795 }
1796 resource_list_add(rl, type, reg, start, end, count);
1797
1798 /*
1799 * Try to allocate the resource for this BAR from our parent
1800 * so that this resource range is already reserved. The
1801 * driver for this device will later inherit this resource in
1802 * pci_alloc_resource().
1803 */
1804 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
1805 prefetch ? RF_PREFETCHABLE : 0);
1806 if (res == NULL) {
1807 /*
1808 * If the allocation fails, clear the BAR and delete
1809 * the resource list entry to force
1810 * pci_alloc_resource() to allocate resources from the
1811 * parent.
1812 */
1813 resource_list_delete(rl, type, reg);
1814 start = 0;
1815 } else
1816 start = rman_get_start(res);
1817 pci_write_config(dev, reg, start, 4);
1818 if (ln2range == 64)
1819 pci_write_config(dev, reg + 4, start >> 32, 4);
1820 return (barlen);
1821 }
1822
1823 /*
1824 * For ATA devices we need to decide early what addressing mode to use.
1825 * Legacy demands that the primary and secondary ATA ports sits on the
1826 * same addresses that old ISA hardware did. This dictates that we use
1827 * those addresses and ignore the BAR's if we cannot set PCI native
1828 * addressing mode.
1829 */
1830 static void
1831 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
1832 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
1833 {
1834 int rid, type, progif;
1835 #if 0
1836 /* if this device supports PCI native addressing use it */
1837 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1838 if ((progif & 0x8a) == 0x8a) {
1839 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
1840 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
1841 printf("Trying ATA native PCI addressing mode\n");
1842 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
1843 }
1844 }
1845 #endif
1846 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1847 type = SYS_RES_IOPORT;
1848 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
1849 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
1850 prefetchmask & (1 << 0));
1851 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
1852 prefetchmask & (1 << 1));
1853 } else {
1854 rid = PCIR_BAR(0);
1855 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
1856 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
1857 0);
1858 rid = PCIR_BAR(1);
1859 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
1860 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
1861 0);
1862 }
1863 if (progif & PCIP_STORAGE_IDE_MODESEC) {
1864 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
1865 prefetchmask & (1 << 2));
1866 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
1867 prefetchmask & (1 << 3));
1868 } else {
1869 rid = PCIR_BAR(2);
1870 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
1871 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
1872 0);
1873 rid = PCIR_BAR(3);
1874 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
1875 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
1876 0);
1877 }
1878 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
1879 prefetchmask & (1 << 4));
1880 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
1881 prefetchmask & (1 << 5));
1882 }
1883
1884 static void
1885 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
1886 {
1887 struct pci_devinfo *dinfo = device_get_ivars(dev);
1888 pcicfgregs *cfg = &dinfo->cfg;
1889 char tunable_name[64];
1890 int irq;
1891
1892 /* Has to have an intpin to have an interrupt. */
1893 if (cfg->intpin == 0)
1894 return;
1895
1896 /* Let the user override the IRQ with a tunable. */
1897 irq = PCI_INVALID_IRQ;
1898 snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.INT%c.irq",
1899 cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
1900 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
1901 irq = PCI_INVALID_IRQ;
1902
1903 /*
1904 * If we didn't get an IRQ via the tunable, then we either use the
1905 * IRQ value in the intline register or we ask the bus to route an
1906 * interrupt for us. If force_route is true, then we only use the
1907 * value in the intline register if the bus was unable to assign an
1908 * IRQ.
1909 */
1910 if (!PCI_INTERRUPT_VALID(irq)) {
1911 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
1912 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
1913 if (!PCI_INTERRUPT_VALID(irq))
1914 irq = cfg->intline;
1915 }
1916
1917 /* If after all that we don't have an IRQ, just bail. */
1918 if (!PCI_INTERRUPT_VALID(irq))
1919 return;
1920
1921 /* Update the config register if it changed. */
1922 if (irq != cfg->intline) {
1923 cfg->intline = irq;
1924 pci_write_config(dev, PCIR_INTLINE, irq, 1);
1925 }
1926
1927 /* Add this IRQ as rid 0 interrupt resource. */
1928 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
1929 }
1930
1931 void
1932 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
1933 {
1934 device_t pcib;
1935 struct pci_devinfo *dinfo = device_get_ivars(dev);
1936 pcicfgregs *cfg = &dinfo->cfg;
1937 struct resource_list *rl = &dinfo->resources;
1938 struct pci_quirk *q;
1939 int b, i, f, s;
1940
1941 pcib = device_get_parent(bus);
1942
1943 b = cfg->bus;
1944 s = cfg->slot;
1945 f = cfg->func;
1946
1947 /* ATA devices needs special map treatment */
1948 if ((pci_get_class(dev) == PCIC_STORAGE) &&
1949 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
1950 ((pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV) ||
1951 (!pci_read_config(dev, PCIR_BAR(0), 4) &&
1952 !pci_read_config(dev, PCIR_BAR(2), 4))) )
1953 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
1954 else
1955 for (i = 0; i < cfg->nummaps;)
1956 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
1957 rl, force, prefetchmask & (1 << i));
1958
1959 /*
1960 * Add additional, quirked resources.
1961 */
1962 for (q = &pci_quirks[0]; q->devid; q++) {
1963 if (q->devid == ((cfg->device << 16) | cfg->vendor)
1964 && q->type == PCI_QUIRK_MAP_REG)
1965 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
1966 force, 0);
1967 }
1968
1969 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
1970 #if defined(__ia64__) || defined(__i386__) || defined(__amd64__) || \
1971 defined(__arm__) || defined(__alpha__)
1972 /*
1973 * Try to re-route interrupts. Sometimes the BIOS or
1974 * firmware may leave bogus values in these registers.
1975 * If the re-route fails, then just stick with what we
1976 * have.
1977 */
1978 pci_assign_interrupt(bus, dev, 1);
1979 #else
1980 pci_assign_interrupt(bus, dev, 0);
1981 #endif
1982 }
1983 }
1984
1985 void
1986 pci_add_children(device_t dev, int busno, size_t dinfo_size)
1987 {
1988 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
1989 device_t pcib = device_get_parent(dev);
1990 struct pci_devinfo *dinfo;
1991 int maxslots;
1992 int s, f, pcifunchigh;
1993 uint8_t hdrtype;
1994
1995 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
1996 ("dinfo_size too small"));
1997 maxslots = PCIB_MAXSLOTS(pcib);
1998 for (s = 0; s <= maxslots; s++) {
1999 pcifunchigh = 0;
2000 f = 0;
2001 DELAY(1);
2002 hdrtype = REG(PCIR_HDRTYPE, 1);
2003 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
2004 continue;
2005 if (hdrtype & PCIM_MFDEV)
2006 pcifunchigh = PCI_FUNCMAX;
2007 for (f = 0; f <= pcifunchigh; f++) {
2008 dinfo = pci_read_device(pcib, busno, s, f, dinfo_size);
2009 if (dinfo != NULL) {
2010 pci_add_child(dev, dinfo);
2011 }
2012 }
2013 }
2014 #undef REG
2015 }
2016
2017 void
2018 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
2019 {
2020 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
2021 device_set_ivars(dinfo->cfg.dev, dinfo);
2022 resource_list_init(&dinfo->resources);
2023 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
2024 pci_cfg_restore(dinfo->cfg.dev, dinfo);
2025 pci_print_verbose(dinfo);
2026 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
2027 }
2028
2029 static int
2030 pci_probe(device_t dev)
2031 {
2032
2033 device_set_desc(dev, "PCI bus");
2034
2035 /* Allow other subclasses to override this driver. */
2036 return (-1000);
2037 }
2038
2039 static int
2040 pci_attach(device_t dev)
2041 {
2042 int busno;
2043
2044 /*
2045 * Since there can be multiple independantly numbered PCI
2046 * busses on some large alpha systems, we can't use the unit
2047 * number to decide what bus we are probing. We ask the parent
2048 * pcib what our bus number is.
2049 */
2050 busno = pcib_get_bus(dev);
2051 if (bootverbose)
2052 device_printf(dev, "physical bus=%d\n", busno);
2053
2054 pci_add_children(dev, busno, sizeof(struct pci_devinfo));
2055
2056 return (bus_generic_attach(dev));
2057 }
2058
2059 int
2060 pci_suspend(device_t dev)
2061 {
2062 int dstate, error, i, numdevs;
2063 device_t acpi_dev, child, *devlist;
2064 struct pci_devinfo *dinfo;
2065
2066 /*
2067 * Save the PCI configuration space for each child and set the
2068 * device in the appropriate power state for this sleep state.
2069 */
2070 acpi_dev = NULL;
2071 if (pci_do_power_resume)
2072 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2073 device_get_children(dev, &devlist, &numdevs);
2074 for (i = 0; i < numdevs; i++) {
2075 child = devlist[i];
2076 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2077 pci_cfg_save(child, dinfo, 0);
2078 }
2079
2080 /* Suspend devices before potentially powering them down. */
2081 error = bus_generic_suspend(dev);
2082 if (error) {
2083 free(devlist, M_TEMP);
2084 return (error);
2085 }
2086
2087 /*
2088 * Always set the device to D3. If ACPI suggests a different
2089 * power state, use it instead. If ACPI is not present, the
2090 * firmware is responsible for managing device power. Skip
2091 * children who aren't attached since they are powered down
2092 * separately. Only manage type 0 devices for now.
2093 */
2094 for (i = 0; acpi_dev && i < numdevs; i++) {
2095 child = devlist[i];
2096 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2097 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2098 dstate = PCI_POWERSTATE_D3;
2099 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2100 pci_set_powerstate(child, dstate);
2101 }
2102 }
2103 free(devlist, M_TEMP);
2104 return (0);
2105 }
2106
2107 int
2108 pci_resume(device_t dev)
2109 {
2110 int i, numdevs;
2111 device_t acpi_dev, child, *devlist;
2112 struct pci_devinfo *dinfo;
2113
2114 /*
2115 * Set each child to D0 and restore its PCI configuration space.
2116 */
2117 acpi_dev = NULL;
2118 if (pci_do_power_resume)
2119 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2120 device_get_children(dev, &devlist, &numdevs);
2121 for (i = 0; i < numdevs; i++) {
2122 /*
2123 * Notify ACPI we're going to D0 but ignore the result. If
2124 * ACPI is not present, the firmware is responsible for
2125 * managing device power. Only manage type 0 devices for now.
2126 */
2127 child = devlist[i];
2128 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2129 if (acpi_dev && device_is_attached(child) &&
2130 dinfo->cfg.hdrtype == 0) {
2131 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2132 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2133 }
2134
2135 /* Now the device is powered up, restore its config space. */
2136 pci_cfg_restore(child, dinfo);
2137 }
2138 free(devlist, M_TEMP);
2139 return (bus_generic_resume(dev));
2140 }
2141
2142 static void
2143 pci_load_vendor_data(void)
2144 {
2145 caddr_t vendordata, info;
2146
2147 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2148 info = preload_search_info(vendordata, MODINFO_ADDR);
2149 pci_vendordata = *(char **)info;
2150 info = preload_search_info(vendordata, MODINFO_SIZE);
2151 pci_vendordata_size = *(size_t *)info;
2152 /* terminate the database */
2153 pci_vendordata[pci_vendordata_size] = '\n';
2154 }
2155 }
2156
2157 void
2158 pci_driver_added(device_t dev, driver_t *driver)
2159 {
2160 int numdevs;
2161 device_t *devlist;
2162 device_t child;
2163 struct pci_devinfo *dinfo;
2164 int i;
2165
2166 if (bootverbose)
2167 device_printf(dev, "driver added\n");
2168 DEVICE_IDENTIFY(driver, dev);
2169 device_get_children(dev, &devlist, &numdevs);
2170 for (i = 0; i < numdevs; i++) {
2171 child = devlist[i];
2172 if (device_get_state(child) != DS_NOTPRESENT)
2173 continue;
2174 dinfo = device_get_ivars(child);
2175 pci_print_verbose(dinfo);
2176 if (bootverbose)
2177 printf("pci%d:%d:%d: reprobing on driver added\n",
2178 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func);
2179 pci_cfg_restore(child, dinfo);
2180 if (device_probe_and_attach(child) != 0)
2181 pci_cfg_save(child, dinfo, 1);
2182 }
2183 free(devlist, M_TEMP);
2184 }
2185
2186 int
2187 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2188 driver_intr_t *intr, void *arg, void **cookiep)
2189 {
2190 struct pci_devinfo *dinfo;
2191 struct msix_table_entry *mte;
2192 struct msix_vector *mv;
2193 uint64_t addr;
2194 uint32_t data;
2195 void *cookie;
2196 int error, rid;
2197
2198 error = bus_generic_setup_intr(dev, child, irq, flags, intr, arg,
2199 &cookie);
2200 if (error)
2201 return (error);
2202
2203 /*
2204 * If this is a direct child, check to see if the interrupt is
2205 * MSI or MSI-X. If so, ask our parent to map the MSI and give
2206 * us the address and data register values. If we fail for some
2207 * reason, teardown the interrupt handler.
2208 */
2209 rid = rman_get_rid(irq);
2210 if (device_get_parent(child) == dev && rid > 0) {
2211 dinfo = device_get_ivars(child);
2212 if (dinfo->cfg.msi.msi_alloc > 0) {
2213 if (dinfo->cfg.msi.msi_addr == 0) {
2214 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2215 ("MSI has handlers, but vectors not mapped"));
2216 error = PCIB_MAP_MSI(device_get_parent(dev),
2217 child, rman_get_start(irq), &addr, &data);
2218 if (error)
2219 goto bad;
2220 dinfo->cfg.msi.msi_addr = addr;
2221 dinfo->cfg.msi.msi_data = data;
2222 pci_enable_msi(child, addr, data);
2223 }
2224 dinfo->cfg.msi.msi_handlers++;
2225 } else {
2226 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2227 ("No MSI or MSI-X interrupts allocated"));
2228 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2229 ("MSI-X index too high"));
2230 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2231 KASSERT(mte->mte_vector != 0, ("no message vector"));
2232 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
2233 KASSERT(mv->mv_irq == rman_get_start(irq),
2234 ("IRQ mismatch"));
2235 if (mv->mv_address == 0) {
2236 KASSERT(mte->mte_handlers == 0,
2237 ("MSI-X table entry has handlers, but vector not mapped"));
2238 error = PCIB_MAP_MSI(device_get_parent(dev),
2239 child, rman_get_start(irq), &addr, &data);
2240 if (error)
2241 goto bad;
2242 mv->mv_address = addr;
2243 mv->mv_data = data;
2244 }
2245 if (mte->mte_handlers == 0) {
2246 pci_enable_msix(child, rid - 1, mv->mv_address,
2247 mv->mv_data);
2248 pci_unmask_msix(child, rid - 1);
2249 }
2250 mte->mte_handlers++;
2251 }
2252 bad:
2253 if (error) {
2254 (void)bus_generic_teardown_intr(dev, child, irq,
2255 cookie);
2256 return (error);
2257 }
2258 }
2259 *cookiep = cookie;
2260 return (0);
2261 }
2262
2263 int
2264 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
2265 void *cookie)
2266 {
2267 struct msix_table_entry *mte;
2268 struct resource_list_entry *rle;
2269 struct pci_devinfo *dinfo;
2270 int error, rid;
2271
2272 /*
2273 * If this is a direct child, check to see if the interrupt is
2274 * MSI or MSI-X. If so, decrement the appropriate handlers
2275 * count and mask the MSI-X message, or disable MSI messages
2276 * if the count drops to 0.
2277 */
2278 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
2279 return (EINVAL);
2280 rid = rman_get_rid(irq);
2281 if (device_get_parent(child) == dev && rid > 0) {
2282 dinfo = device_get_ivars(child);
2283 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
2284 if (rle->res != irq)
2285 return (EINVAL);
2286 if (dinfo->cfg.msi.msi_alloc > 0) {
2287 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
2288 ("MSI-X index too high"));
2289 if (dinfo->cfg.msi.msi_handlers == 0)
2290 return (EINVAL);
2291 dinfo->cfg.msi.msi_handlers--;
2292 if (dinfo->cfg.msi.msi_handlers == 0)
2293 pci_disable_msi(child);
2294 } else {
2295 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2296 ("No MSI or MSI-X interrupts allocated"));
2297 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2298 ("MSI-X index too high"));
2299 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2300 if (mte->mte_handlers == 0)
2301 return (EINVAL);
2302 mte->mte_handlers--;
2303 if (mte->mte_handlers == 0)
2304 pci_mask_msix(child, rid - 1);
2305 }
2306 }
2307 error = bus_generic_teardown_intr(dev, child, irq, cookie);
2308 if (device_get_parent(child) == dev && rid > 0)
2309 KASSERT(error == 0,
2310 ("%s: generic teardown failed for MSI/MSI-X", __func__));
2311 return (error);
2312 }
2313
2314 int
2315 pci_print_child(device_t dev, device_t child)
2316 {
2317 struct pci_devinfo *dinfo;
2318 struct resource_list *rl;
2319 int retval = 0;
2320
2321 dinfo = device_get_ivars(child);
2322 rl = &dinfo->resources;
2323
2324 retval += bus_print_child_header(dev, child);
2325
2326 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2327 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2328 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2329 if (device_get_flags(dev))
2330 retval += printf(" flags %#x", device_get_flags(dev));
2331
2332 retval += printf(" at device %d.%d", pci_get_slot(child),
2333 pci_get_function(child));
2334
2335 retval += bus_print_child_footer(dev, child);
2336
2337 return (retval);
2338 }
2339
2340 static struct
2341 {
2342 int class;
2343 int subclass;
2344 char *desc;
2345 } pci_nomatch_tab[] = {
2346 {PCIC_OLD, -1, "old"},
2347 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2348 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2349 {PCIC_STORAGE, -1, "mass storage"},
2350 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2351 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2352 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2353 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2354 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2355 {PCIC_NETWORK, -1, "network"},
2356 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2357 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2358 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2359 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2360 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2361 {PCIC_DISPLAY, -1, "display"},
2362 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2363 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2364 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2365 {PCIC_MULTIMEDIA, -1, "multimedia"},
2366 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2367 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2368 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2369 {PCIC_MEMORY, -1, "memory"},
2370 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2371 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2372 {PCIC_BRIDGE, -1, "bridge"},
2373 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2374 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2375 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2376 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2377 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2378 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2379 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2380 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2381 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2382 {PCIC_SIMPLECOMM, -1, "simple comms"},
2383 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2384 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2385 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2386 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2387 {PCIC_BASEPERIPH, -1, "base peripheral"},
2388 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2389 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2390 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2391 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2392 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2393 {PCIC_INPUTDEV, -1, "input device"},
2394 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2395 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2396 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2397 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2398 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2399 {PCIC_DOCKING, -1, "docking station"},
2400 {PCIC_PROCESSOR, -1, "processor"},
2401 {PCIC_SERIALBUS, -1, "serial bus"},
2402 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2403 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2404 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2405 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2406 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2407 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2408 {PCIC_WIRELESS, -1, "wireless controller"},
2409 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2410 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2411 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2412 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2413 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2414 {PCIC_SATCOM, -1, "satellite communication"},
2415 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2416 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2417 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2418 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2419 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2420 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2421 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2422 {PCIC_DASP, -1, "dasp"},
2423 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2424 {0, 0, NULL}
2425 };
2426
2427 void
2428 pci_probe_nomatch(device_t dev, device_t child)
2429 {
2430 int i;
2431 char *cp, *scp, *device;
2432
2433 /*
2434 * Look for a listing for this device in a loaded device database.
2435 */
2436 if ((device = pci_describe_device(child)) != NULL) {
2437 device_printf(dev, "<%s>", device);
2438 free(device, M_DEVBUF);
2439 } else {
2440 /*
2441 * Scan the class/subclass descriptions for a general
2442 * description.
2443 */
2444 cp = "unknown";
2445 scp = NULL;
2446 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2447 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2448 if (pci_nomatch_tab[i].subclass == -1) {
2449 cp = pci_nomatch_tab[i].desc;
2450 } else if (pci_nomatch_tab[i].subclass ==
2451 pci_get_subclass(child)) {
2452 scp = pci_nomatch_tab[i].desc;
2453 }
2454 }
2455 }
2456 device_printf(dev, "<%s%s%s>",
2457 cp ? cp : "",
2458 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2459 scp ? scp : "");
2460 }
2461 printf(" at device %d.%d (no driver attached)\n",
2462 pci_get_slot(child), pci_get_function(child));
2463 if (pci_do_power_nodriver)
2464 pci_cfg_save(child,
2465 (struct pci_devinfo *) device_get_ivars(child), 1);
2466 return;
2467 }
2468
2469 /*
2470 * Parse the PCI device database, if loaded, and return a pointer to a
2471 * description of the device.
2472 *
2473 * The database is flat text formatted as follows:
2474 *
2475 * Any line not in a valid format is ignored.
2476 * Lines are terminated with newline '\n' characters.
2477 *
2478 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2479 * the vendor name.
2480 *
2481 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2482 * - devices cannot be listed without a corresponding VENDOR line.
2483 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2484 * another TAB, then the device name.
2485 */
2486
2487 /*
2488 * Assuming (ptr) points to the beginning of a line in the database,
2489 * return the vendor or device and description of the next entry.
2490 * The value of (vendor) or (device) inappropriate for the entry type
2491 * is set to -1. Returns nonzero at the end of the database.
2492 *
2493 * Note that this is slightly unrobust in the face of corrupt data;
2494 * we attempt to safeguard against this by spamming the end of the
2495 * database with a newline when we initialise.
2496 */
2497 static int
2498 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
2499 {
2500 char *cp = *ptr;
2501 int left;
2502
2503 *device = -1;
2504 *vendor = -1;
2505 **desc = '\0';
2506 for (;;) {
2507 left = pci_vendordata_size - (cp - pci_vendordata);
2508 if (left <= 0) {
2509 *ptr = cp;
2510 return(1);
2511 }
2512
2513 /* vendor entry? */
2514 if (*cp != '\t' &&
2515 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
2516 break;
2517 /* device entry? */
2518 if (*cp == '\t' &&
2519 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
2520 break;
2521
2522 /* skip to next line */
2523 while (*cp != '\n' && left > 0) {
2524 cp++;
2525 left--;
2526 }
2527 if (*cp == '\n') {
2528 cp++;
2529 left--;
2530 }
2531 }
2532 /* skip to next line */
2533 while (*cp != '\n' && left > 0) {
2534 cp++;
2535 left--;
2536 }
2537 if (*cp == '\n' && left > 0)
2538 cp++;
2539 *ptr = cp;
2540 return(0);
2541 }
2542
2543 static char *
2544 pci_describe_device(device_t dev)
2545 {
2546 int vendor, device;
2547 char *desc, *vp, *dp, *line;
2548
2549 desc = vp = dp = NULL;
2550
2551 /*
2552 * If we have no vendor data, we can't do anything.
2553 */
2554 if (pci_vendordata == NULL)
2555 goto out;
2556
2557 /*
2558 * Scan the vendor data looking for this device
2559 */
2560 line = pci_vendordata;
2561 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2562 goto out;
2563 for (;;) {
2564 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
2565 goto out;
2566 if (vendor == pci_get_vendor(dev))
2567 break;
2568 }
2569 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2570 goto out;
2571 for (;;) {
2572 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
2573 *dp = 0;
2574 break;
2575 }
2576 if (vendor != -1) {
2577 *dp = 0;
2578 break;
2579 }
2580 if (device == pci_get_device(dev))
2581 break;
2582 }
2583 if (dp[0] == '\0')
2584 snprintf(dp, 80, "0x%x", pci_get_device(dev));
2585 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
2586 NULL)
2587 sprintf(desc, "%s, %s", vp, dp);
2588 out:
2589 if (vp != NULL)
2590 free(vp, M_DEVBUF);
2591 if (dp != NULL)
2592 free(dp, M_DEVBUF);
2593 return(desc);
2594 }
2595
2596 int
2597 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
2598 {
2599 struct pci_devinfo *dinfo;
2600 pcicfgregs *cfg;
2601
2602 dinfo = device_get_ivars(child);
2603 cfg = &dinfo->cfg;
2604
2605 switch (which) {
2606 case PCI_IVAR_ETHADDR:
2607 /*
2608 * The generic accessor doesn't deal with failure, so
2609 * we set the return value, then return an error.
2610 */
2611 *((uint8_t **) result) = NULL;
2612 return (EINVAL);
2613 case PCI_IVAR_SUBVENDOR:
2614 *result = cfg->subvendor;
2615 break;
2616 case PCI_IVAR_SUBDEVICE:
2617 *result = cfg->subdevice;
2618 break;
2619 case PCI_IVAR_VENDOR:
2620 *result = cfg->vendor;
2621 break;
2622 case PCI_IVAR_DEVICE:
2623 *result = cfg->device;
2624 break;
2625 case PCI_IVAR_DEVID:
2626 *result = (cfg->device << 16) | cfg->vendor;
2627 break;
2628 case PCI_IVAR_CLASS:
2629 *result = cfg->baseclass;
2630 break;
2631 case PCI_IVAR_SUBCLASS:
2632 *result = cfg->subclass;
2633 break;
2634 case PCI_IVAR_PROGIF:
2635 *result = cfg->progif;
2636 break;
2637 case PCI_IVAR_REVID:
2638 *result = cfg->revid;
2639 break;
2640 case PCI_IVAR_INTPIN:
2641 *result = cfg->intpin;
2642 break;
2643 case PCI_IVAR_IRQ:
2644 *result = cfg->intline;
2645 break;
2646 case PCI_IVAR_BUS:
2647 *result = cfg->bus;
2648 break;
2649 case PCI_IVAR_SLOT:
2650 *result = cfg->slot;
2651 break;
2652 case PCI_IVAR_FUNCTION:
2653 *result = cfg->func;
2654 break;
2655 case PCI_IVAR_CMDREG:
2656 *result = cfg->cmdreg;
2657 break;
2658 case PCI_IVAR_CACHELNSZ:
2659 *result = cfg->cachelnsz;
2660 break;
2661 case PCI_IVAR_MINGNT:
2662 *result = cfg->mingnt;
2663 break;
2664 case PCI_IVAR_MAXLAT:
2665 *result = cfg->maxlat;
2666 break;
2667 case PCI_IVAR_LATTIMER:
2668 *result = cfg->lattimer;
2669 break;
2670 default:
2671 return (ENOENT);
2672 }
2673 return (0);
2674 }
2675
2676 int
2677 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
2678 {
2679 struct pci_devinfo *dinfo;
2680
2681 dinfo = device_get_ivars(child);
2682
2683 switch (which) {
2684 case PCI_IVAR_INTPIN:
2685 dinfo->cfg.intpin = value;
2686 return (0);
2687 case PCI_IVAR_ETHADDR:
2688 case PCI_IVAR_SUBVENDOR:
2689 case PCI_IVAR_SUBDEVICE:
2690 case PCI_IVAR_VENDOR:
2691 case PCI_IVAR_DEVICE:
2692 case PCI_IVAR_DEVID:
2693 case PCI_IVAR_CLASS:
2694 case PCI_IVAR_SUBCLASS:
2695 case PCI_IVAR_PROGIF:
2696 case PCI_IVAR_REVID:
2697 case PCI_IVAR_IRQ:
2698 case PCI_IVAR_BUS:
2699 case PCI_IVAR_SLOT:
2700 case PCI_IVAR_FUNCTION:
2701 return (EINVAL); /* disallow for now */
2702
2703 default:
2704 return (ENOENT);
2705 }
2706 }
2707
2708
2709 #include "opt_ddb.h"
2710 #ifdef DDB
2711 #include <ddb/ddb.h>
2712 #include <sys/cons.h>
2713
2714 /*
2715 * List resources based on pci map registers, used for within ddb
2716 */
2717
2718 DB_SHOW_COMMAND(pciregs, db_pci_dump)
2719 {
2720 struct pci_devinfo *dinfo;
2721 struct devlist *devlist_head;
2722 struct pci_conf *p;
2723 const char *name;
2724 int i, error, none_count, quit;
2725
2726 none_count = 0;
2727 /* get the head of the device queue */
2728 devlist_head = &pci_devq;
2729
2730 /*
2731 * Go through the list of devices and print out devices
2732 */
2733 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
2734 for (error = 0, i = 0, quit = 0,
2735 dinfo = STAILQ_FIRST(devlist_head);
2736 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !quit;
2737 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
2738
2739 /* Populate pd_name and pd_unit */
2740 name = NULL;
2741 if (dinfo->cfg.dev)
2742 name = device_get_name(dinfo->cfg.dev);
2743
2744 p = &dinfo->conf;
2745 db_printf("%s%d@pci%d:%d:%d:\tclass=0x%06x card=0x%08x "
2746 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
2747 (name && *name) ? name : "none",
2748 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
2749 none_count++,
2750 p->pc_sel.pc_bus, p->pc_sel.pc_dev,
2751 p->pc_sel.pc_func, (p->pc_class << 16) |
2752 (p->pc_subclass << 8) | p->pc_progif,
2753 (p->pc_subdevice << 16) | p->pc_subvendor,
2754 (p->pc_device << 16) | p->pc_vendor,
2755 p->pc_revid, p->pc_hdr);
2756 }
2757 }
2758 #endif /* DDB */
2759
2760 static struct resource *
2761 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
2762 u_long start, u_long end, u_long count, u_int flags)
2763 {
2764 struct pci_devinfo *dinfo = device_get_ivars(child);
2765 struct resource_list *rl = &dinfo->resources;
2766 struct resource_list_entry *rle;
2767 struct resource *res;
2768 uint32_t map, testval;
2769 int mapsize;
2770
2771 /*
2772 * Weed out the bogons, and figure out how large the BAR/map
2773 * is. Bars that read back 0 here are bogus and unimplemented.
2774 * Note: atapci in legacy mode are special and handled elsewhere
2775 * in the code. If you have a atapci device in legacy mode and
2776 * it fails here, that other code is broken.
2777 */
2778 res = NULL;
2779 map = pci_read_config(child, *rid, 4);
2780 pci_write_config(child, *rid, 0xffffffff, 4);
2781 testval = pci_read_config(child, *rid, 4);
2782 if (pci_mapbase(testval) == 0)
2783 goto out;
2784 if (pci_maptype(testval) & PCI_MAPMEM) {
2785 if (type != SYS_RES_MEMORY) {
2786 if (bootverbose)
2787 device_printf(dev,
2788 "child %s requested type %d for rid %#x,"
2789 " but the BAR says it is an memio\n",
2790 device_get_nameunit(child), type, *rid);
2791 goto out;
2792 }
2793 } else {
2794 if (type != SYS_RES_IOPORT) {
2795 if (bootverbose)
2796 device_printf(dev,
2797 "child %s requested type %d for rid %#x,"
2798 " but the BAR says it is an ioport\n",
2799 device_get_nameunit(child), type, *rid);
2800 goto out;
2801 }
2802 }
2803 /*
2804 * For real BARs, we need to override the size that
2805 * the driver requests, because that's what the BAR
2806 * actually uses and we would otherwise have a
2807 * situation where we might allocate the excess to
2808 * another driver, which won't work.
2809 */
2810 mapsize = pci_mapsize(testval);
2811 count = 1 << mapsize;
2812 if (RF_ALIGNMENT(flags) < mapsize)
2813 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
2814
2815 /*
2816 * Allocate enough resource, and then write back the
2817 * appropriate bar for that resource.
2818 */
2819 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
2820 start, end, count, flags);
2821 if (res == NULL) {
2822 device_printf(child,
2823 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
2824 count, *rid, type, start, end);
2825 goto out;
2826 }
2827 resource_list_add(rl, type, *rid, start, end, count);
2828 rle = resource_list_find(rl, type, *rid);
2829 if (rle == NULL)
2830 panic("pci_alloc_map: unexpectedly can't find resource.");
2831 rle->res = res;
2832 rle->start = rman_get_start(res);
2833 rle->end = rman_get_end(res);
2834 rle->count = count;
2835 if (bootverbose)
2836 device_printf(child,
2837 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
2838 count, *rid, type, rman_get_start(res));
2839 map = rman_get_start(res);
2840 out:;
2841 pci_write_config(child, *rid, map, 4);
2842 return (res);
2843 }
2844
2845
2846 struct resource *
2847 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
2848 u_long start, u_long end, u_long count, u_int flags)
2849 {
2850 struct pci_devinfo *dinfo = device_get_ivars(child);
2851 struct resource_list *rl = &dinfo->resources;
2852 struct resource_list_entry *rle;
2853 pcicfgregs *cfg = &dinfo->cfg;
2854
2855 /*
2856 * Perform lazy resource allocation
2857 */
2858 if (device_get_parent(child) == dev) {
2859 switch (type) {
2860 case SYS_RES_IRQ:
2861 /*
2862 * Can't alloc legacy interrupt once MSI messages
2863 * have been allocated.
2864 */
2865 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
2866 cfg->msix.msix_alloc > 0))
2867 return (NULL);
2868 /*
2869 * If the child device doesn't have an
2870 * interrupt routed and is deserving of an
2871 * interrupt, try to assign it one.
2872 */
2873 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
2874 (cfg->intpin != 0))
2875 pci_assign_interrupt(dev, child, 0);
2876 break;
2877 case SYS_RES_IOPORT:
2878 case SYS_RES_MEMORY:
2879 if (*rid < PCIR_BAR(cfg->nummaps)) {
2880 /*
2881 * Enable the I/O mode. We should
2882 * also be assigning resources too
2883 * when none are present. The
2884 * resource_list_alloc kind of sorta does
2885 * this...
2886 */
2887 if (PCI_ENABLE_IO(dev, child, type))
2888 return (NULL);
2889 }
2890 rle = resource_list_find(rl, type, *rid);
2891 if (rle == NULL)
2892 return (pci_alloc_map(dev, child, type, rid,
2893 start, end, count, flags));
2894 break;
2895 }
2896 /*
2897 * If we've already allocated the resource, then
2898 * return it now. But first we may need to activate
2899 * it, since we don't allocate the resource as active
2900 * above. Normally this would be done down in the
2901 * nexus, but since we short-circuit that path we have
2902 * to do its job here. Not sure if we should free the
2903 * resource if it fails to activate.
2904 */
2905 rle = resource_list_find(rl, type, *rid);
2906 if (rle != NULL && rle->res != NULL) {
2907 if (bootverbose)
2908 device_printf(child,
2909 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
2910 rman_get_size(rle->res), *rid, type,
2911 rman_get_start(rle->res));
2912 if ((flags & RF_ACTIVE) &&
2913 bus_generic_activate_resource(dev, child, type,
2914 *rid, rle->res) != 0)
2915 return NULL;
2916 return (rle->res);
2917 }
2918 }
2919 return (resource_list_alloc(rl, dev, child, type, rid,
2920 start, end, count, flags));
2921 }
2922
2923 void
2924 pci_delete_resource(device_t dev, device_t child, int type, int rid)
2925 {
2926 struct pci_devinfo *dinfo;
2927 struct resource_list *rl;
2928 struct resource_list_entry *rle;
2929
2930 if (device_get_parent(child) != dev)
2931 return;
2932
2933 dinfo = device_get_ivars(child);
2934 rl = &dinfo->resources;
2935 rle = resource_list_find(rl, type, rid);
2936 if (rle) {
2937 if (rle->res) {
2938 if (rman_get_device(rle->res) != dev ||
2939 rman_get_flags(rle->res) & RF_ACTIVE) {
2940 device_printf(dev, "delete_resource: "
2941 "Resource still owned by child, oops. "
2942 "(type=%d, rid=%d, addr=%lx)\n",
2943 rle->type, rle->rid,
2944 rman_get_start(rle->res));
2945 return;
2946 }
2947 bus_release_resource(dev, type, rid, rle->res);
2948 }
2949 resource_list_delete(rl, type, rid);
2950 }
2951 /*
2952 * Why do we turn off the PCI configuration BAR when we delete a
2953 * resource? -- imp
2954 */
2955 pci_write_config(child, rid, 0, 4);
2956 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
2957 }
2958
2959 struct resource_list *
2960 pci_get_resource_list (device_t dev, device_t child)
2961 {
2962 struct pci_devinfo *dinfo = device_get_ivars(child);
2963
2964 return (&dinfo->resources);
2965 }
2966
2967 uint32_t
2968 pci_read_config_method(device_t dev, device_t child, int reg, int width)
2969 {
2970 struct pci_devinfo *dinfo = device_get_ivars(child);
2971 pcicfgregs *cfg = &dinfo->cfg;
2972
2973 return (PCIB_READ_CONFIG(device_get_parent(dev),
2974 cfg->bus, cfg->slot, cfg->func, reg, width));
2975 }
2976
2977 void
2978 pci_write_config_method(device_t dev, device_t child, int reg,
2979 uint32_t val, int width)
2980 {
2981 struct pci_devinfo *dinfo = device_get_ivars(child);
2982 pcicfgregs *cfg = &dinfo->cfg;
2983
2984 PCIB_WRITE_CONFIG(device_get_parent(dev),
2985 cfg->bus, cfg->slot, cfg->func, reg, val, width);
2986 }
2987
2988 int
2989 pci_child_location_str_method(device_t dev, device_t child, char *buf,
2990 size_t buflen)
2991 {
2992
2993 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
2994 pci_get_function(child));
2995 return (0);
2996 }
2997
2998 int
2999 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
3000 size_t buflen)
3001 {
3002 struct pci_devinfo *dinfo;
3003 pcicfgregs *cfg;
3004
3005 dinfo = device_get_ivars(child);
3006 cfg = &dinfo->cfg;
3007 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
3008 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
3009 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
3010 cfg->progif);
3011 return (0);
3012 }
3013
3014 int
3015 pci_assign_interrupt_method(device_t dev, device_t child)
3016 {
3017 struct pci_devinfo *dinfo = device_get_ivars(child);
3018 pcicfgregs *cfg = &dinfo->cfg;
3019
3020 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
3021 cfg->intpin));
3022 }
3023
3024 static int
3025 pci_modevent(module_t mod, int what, void *arg)
3026 {
3027 static struct cdev *pci_cdev;
3028
3029 switch (what) {
3030 case MOD_LOAD:
3031 STAILQ_INIT(&pci_devq);
3032 pci_generation = 0;
3033 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
3034 "pci");
3035 pci_load_vendor_data();
3036 break;
3037
3038 case MOD_UNLOAD:
3039 destroy_dev(pci_cdev);
3040 break;
3041 }
3042
3043 return (0);
3044 }
3045
3046 void
3047 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
3048 {
3049 int i;
3050
3051 /*
3052 * Only do header type 0 devices. Type 1 devices are bridges,
3053 * which we know need special treatment. Type 2 devices are
3054 * cardbus bridges which also require special treatment.
3055 * Other types are unknown, and we err on the side of safety
3056 * by ignoring them.
3057 */
3058 if (dinfo->cfg.hdrtype != 0)
3059 return;
3060
3061 /*
3062 * Restore the device to full power mode. We must do this
3063 * before we restore the registers because moving from D3 to
3064 * D0 will cause the chip's BARs and some other registers to
3065 * be reset to some unknown power on reset values. Cut down
3066 * the noise on boot by doing nothing if we are already in
3067 * state D0.
3068 */
3069 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3070 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3071 }
3072 for (i = 0; i < dinfo->cfg.nummaps; i++)
3073 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3074 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3075 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3076 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3077 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3078 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3079 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3080 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3081 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3082 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3083 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3084
3085 /* Restore MSI and MSI-X configurations if they are present. */
3086 if (dinfo->cfg.msi.msi_location != 0)
3087 pci_resume_msi(dev);
3088 if (dinfo->cfg.msix.msix_location != 0)
3089 pci_resume_msix(dev);
3090 }
3091
3092 void
3093 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3094 {
3095 int i;
3096 uint32_t cls;
3097 int ps;
3098
3099 /*
3100 * Only do header type 0 devices. Type 1 devices are bridges, which
3101 * we know need special treatment. Type 2 devices are cardbus bridges
3102 * which also require special treatment. Other types are unknown, and
3103 * we err on the side of safety by ignoring them. Powering down
3104 * bridges should not be undertaken lightly.
3105 */
3106 if (dinfo->cfg.hdrtype != 0)
3107 return;
3108 for (i = 0; i < dinfo->cfg.nummaps; i++)
3109 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3110 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3111
3112 /*
3113 * Some drivers apparently write to these registers w/o updating our
3114 * cached copy. No harm happens if we update the copy, so do so here
3115 * so we can restore them. The COMMAND register is modified by the
3116 * bus w/o updating the cache. This should represent the normally
3117 * writable portion of the 'defined' part of type 0 headers. In
3118 * theory we also need to save/restore the PCI capability structures
3119 * we know about, but apart from power we don't know any that are
3120 * writable.
3121 */
3122 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3123 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3124 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3125 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3126 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3127 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3128 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3129 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3130 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3131 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3132 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3133 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3134 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3135 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3136 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3137
3138 /*
3139 * don't set the state for display devices, base peripherals and
3140 * memory devices since bad things happen when they are powered down.
3141 * We should (a) have drivers that can easily detach and (b) use
3142 * generic drivers for these devices so that some device actually
3143 * attaches. We need to make sure that when we implement (a) we don't
3144 * power the device down on a reattach.
3145 */
3146 cls = pci_get_class(dev);
3147 if (!setstate)
3148 return;
3149 switch (pci_do_power_nodriver)
3150 {
3151 case 0: /* NO powerdown at all */
3152 return;
3153 case 1: /* Conservative about what to power down */
3154 if (cls == PCIC_STORAGE)
3155 return;
3156 /*FALLTHROUGH*/
3157 case 2: /* Agressive about what to power down */
3158 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3159 cls == PCIC_BASEPERIPH)
3160 return;
3161 /*FALLTHROUGH*/
3162 case 3: /* Power down everything */
3163 break;
3164 }
3165 /*
3166 * PCI spec says we can only go into D3 state from D0 state.
3167 * Transition from D[12] into D0 before going to D3 state.
3168 */
3169 ps = pci_get_powerstate(dev);
3170 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3171 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3172 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3173 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
3174 }
Cache object: f021aa7d0af57f2bfb4dbc30ec93b335
|