FreeBSD/Linux Kernel Cross Reference
sys/dev/pci/pci.c
1 /*-
2 * Copyright (c) 1997, Stefan Esser <se@freebsd.org>
3 * Copyright (c) 2000, Michael Smith <msmith@freebsd.org>
4 * Copyright (c) 2000, BSDi
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
12 * disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
16 *
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD: releng/6.3/sys/dev/pci/pci.c 171855 2007-08-15 20:56:10Z jhb $");
31
32 #include "opt_bus.h"
33
34 #include <sys/param.h>
35 #include <sys/systm.h>
36 #include <sys/malloc.h>
37 #include <sys/module.h>
38 #include <sys/linker.h>
39 #include <sys/fcntl.h>
40 #include <sys/conf.h>
41 #include <sys/kernel.h>
42 #include <sys/queue.h>
43 #include <sys/sysctl.h>
44 #include <sys/types.h>
45
46 #include <vm/vm.h>
47 #include <vm/pmap.h>
48 #include <vm/vm_extern.h>
49
50 #include <sys/bus.h>
51 #include <machine/bus.h>
52 #include <sys/rman.h>
53 #include <machine/resource.h>
54
55 #if defined(__i386__) || defined(__amd64__)
56 #include <machine/intr_machdep.h>
57 #endif
58
59 #include <sys/pciio.h>
60 #include <dev/pci/pcireg.h>
61 #include <dev/pci/pcivar.h>
62 #include <dev/pci/pci_private.h>
63
64 #include "pcib_if.h"
65 #include "pci_if.h"
66
67 #if (defined(__i386__) && !defined(PC98)) || defined(__amd64__) || \
68 defined (__ia64__)
69 #include <contrib/dev/acpica/acpi.h>
70 #include "acpi_if.h"
71 #else
72 #define ACPI_PWR_FOR_SLEEP(x, y, z)
73 #endif
74
75 static uint32_t pci_mapbase(unsigned mapreg);
76 static int pci_maptype(unsigned mapreg);
77 static int pci_mapsize(unsigned testval);
78 static int pci_maprange(unsigned mapreg);
79 static void pci_fixancient(pcicfgregs *cfg);
80
81 static int pci_porten(device_t pcib, int b, int s, int f);
82 static int pci_memen(device_t pcib, int b, int s, int f);
83 static void pci_assign_interrupt(device_t bus, device_t dev,
84 int force_route);
85 static int pci_add_map(device_t pcib, device_t bus, device_t dev,
86 int b, int s, int f, int reg,
87 struct resource_list *rl, int force, int prefetch);
88 static int pci_probe(device_t dev);
89 static int pci_attach(device_t dev);
90 static void pci_load_vendor_data(void);
91 static int pci_describe_parse_line(char **ptr, int *vendor,
92 int *device, char **desc);
93 static char *pci_describe_device(device_t dev);
94 static int pci_modevent(module_t mod, int what, void *arg);
95 static void pci_hdrtypedata(device_t pcib, int b, int s, int f,
96 pcicfgregs *cfg);
97 static void pci_read_extcap(device_t pcib, pcicfgregs *cfg);
98 static void pci_disable_msi(device_t dev);
99 static void pci_enable_msi(device_t dev, uint64_t address,
100 uint16_t data);
101 static void pci_enable_msix(device_t dev, u_int index,
102 uint64_t address, uint32_t data);
103 static void pci_mask_msix(device_t dev, u_int index);
104 static void pci_unmask_msix(device_t dev, u_int index);
105 static int pci_msi_blacklisted(void);
106 static void pci_resume_msi(device_t dev);
107 static void pci_resume_msix(device_t dev);
108
109 static device_method_t pci_methods[] = {
110 /* Device interface */
111 DEVMETHOD(device_probe, pci_probe),
112 DEVMETHOD(device_attach, pci_attach),
113 DEVMETHOD(device_detach, bus_generic_detach),
114 DEVMETHOD(device_shutdown, bus_generic_shutdown),
115 DEVMETHOD(device_suspend, pci_suspend),
116 DEVMETHOD(device_resume, pci_resume),
117
118 /* Bus interface */
119 DEVMETHOD(bus_print_child, pci_print_child),
120 DEVMETHOD(bus_probe_nomatch, pci_probe_nomatch),
121 DEVMETHOD(bus_read_ivar, pci_read_ivar),
122 DEVMETHOD(bus_write_ivar, pci_write_ivar),
123 DEVMETHOD(bus_driver_added, pci_driver_added),
124 DEVMETHOD(bus_setup_intr, pci_setup_intr),
125 DEVMETHOD(bus_teardown_intr, pci_teardown_intr),
126
127 DEVMETHOD(bus_get_resource_list,pci_get_resource_list),
128 DEVMETHOD(bus_set_resource, bus_generic_rl_set_resource),
129 DEVMETHOD(bus_get_resource, bus_generic_rl_get_resource),
130 DEVMETHOD(bus_delete_resource, pci_delete_resource),
131 DEVMETHOD(bus_alloc_resource, pci_alloc_resource),
132 DEVMETHOD(bus_release_resource, bus_generic_rl_release_resource),
133 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
134 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
135 DEVMETHOD(bus_child_pnpinfo_str, pci_child_pnpinfo_str_method),
136 DEVMETHOD(bus_child_location_str, pci_child_location_str_method),
137
138 /* PCI interface */
139 DEVMETHOD(pci_read_config, pci_read_config_method),
140 DEVMETHOD(pci_write_config, pci_write_config_method),
141 DEVMETHOD(pci_enable_busmaster, pci_enable_busmaster_method),
142 DEVMETHOD(pci_disable_busmaster, pci_disable_busmaster_method),
143 DEVMETHOD(pci_enable_io, pci_enable_io_method),
144 DEVMETHOD(pci_disable_io, pci_disable_io_method),
145 DEVMETHOD(pci_get_powerstate, pci_get_powerstate_method),
146 DEVMETHOD(pci_set_powerstate, pci_set_powerstate_method),
147 DEVMETHOD(pci_assign_interrupt, pci_assign_interrupt_method),
148 DEVMETHOD(pci_find_extcap, pci_find_extcap_method),
149 DEVMETHOD(pci_alloc_msi, pci_alloc_msi_method),
150 DEVMETHOD(pci_alloc_msix, pci_alloc_msix_method),
151 DEVMETHOD(pci_release_msi, pci_release_msi_method),
152 DEVMETHOD(pci_msi_count, pci_msi_count_method),
153 DEVMETHOD(pci_msix_count, pci_msix_count_method),
154
155 { 0, 0 }
156 };
157
158 DEFINE_CLASS_0(pci, pci_driver, pci_methods, 0);
159
160 static devclass_t pci_devclass;
161 DRIVER_MODULE(pci, pcib, pci_driver, pci_devclass, pci_modevent, 0);
162 MODULE_VERSION(pci, 1);
163
164 static char *pci_vendordata;
165 static size_t pci_vendordata_size;
166
167
168 struct pci_quirk {
169 uint32_t devid; /* Vendor/device of the card */
170 int type;
171 #define PCI_QUIRK_MAP_REG 1 /* PCI map register in weird place */
172 #define PCI_QUIRK_DISABLE_MSI 2 /* MSI/MSI-X doesn't work */
173 int arg1;
174 int arg2;
175 };
176
177 struct pci_quirk pci_quirks[] = {
178 /* The Intel 82371AB and 82443MX has a map register at offset 0x90. */
179 { 0x71138086, PCI_QUIRK_MAP_REG, 0x90, 0 },
180 { 0x719b8086, PCI_QUIRK_MAP_REG, 0x90, 0 },
181 /* As does the Serverworks OSB4 (the SMBus mapping register) */
182 { 0x02001166, PCI_QUIRK_MAP_REG, 0x90, 0 },
183
184 /*
185 * MSI doesn't work with the ServerWorks CNB20-HE Host Bridge
186 * or the CMIC-SL (AKA ServerWorks GC_LE).
187 */
188 { 0x00141166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
189 { 0x00171166, PCI_QUIRK_DISABLE_MSI, 0, 0 },
190
191 /*
192 * MSI doesn't work on earlier Intel chipsets including
193 * E7500, E7501, E7505, 845, 865, 875/E7210, and 855.
194 */
195 { 0x25408086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
196 { 0x254c8086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
197 { 0x25508086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
198 { 0x25608086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
199 { 0x25708086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
200 { 0x25788086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
201 { 0x35808086, PCI_QUIRK_DISABLE_MSI, 0, 0 },
202
203 /*
204 * MSI doesn't work with devices behind the AMD 8131 HT-PCIX
205 * bridge.
206 */
207 { 0x74501022, PCI_QUIRK_DISABLE_MSI, 0, 0 },
208
209 { 0 }
210 };
211
212 /* map register information */
213 #define PCI_MAPMEM 0x01 /* memory map */
214 #define PCI_MAPMEMP 0x02 /* prefetchable memory map */
215 #define PCI_MAPPORT 0x04 /* port map */
216
217 struct devlist pci_devq;
218 uint32_t pci_generation;
219 uint32_t pci_numdevs = 0;
220 static int pcie_chipset, pcix_chipset;
221
222 /* sysctl vars */
223 SYSCTL_NODE(_hw, OID_AUTO, pci, CTLFLAG_RD, 0, "PCI bus tuning parameters");
224
225 static int pci_enable_io_modes = 1;
226 TUNABLE_INT("hw.pci.enable_io_modes", &pci_enable_io_modes);
227 SYSCTL_INT(_hw_pci, OID_AUTO, enable_io_modes, CTLFLAG_RW,
228 &pci_enable_io_modes, 1,
229 "Enable I/O and memory bits in the config register. Some BIOSes do not\n\
230 enable these bits correctly. We'd like to do this all the time, but there\n\
231 are some peripherals that this causes problems with.");
232
233 static int pci_do_power_nodriver = 0;
234 TUNABLE_INT("hw.pci.do_power_nodriver", &pci_do_power_nodriver);
235 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_nodriver, CTLFLAG_RW,
236 &pci_do_power_nodriver, 0,
237 "Place a function into D3 state when no driver attaches to it. 0 means\n\
238 disable. 1 means conservatively place devices into D3 state. 2 means\n\
239 agressively place devices into D3 state. 3 means put absolutely everything\n\
240 in D3 state.");
241
242 static int pci_do_power_resume = 1;
243 TUNABLE_INT("hw.pci.do_power_resume", &pci_do_power_resume);
244 SYSCTL_INT(_hw_pci, OID_AUTO, do_power_resume, CTLFLAG_RW,
245 &pci_do_power_resume, 1,
246 "Transition from D3 -> D0 on resume.");
247
248 static int pci_do_msi = 0;
249 TUNABLE_INT("hw.pci.enable_msi", &pci_do_msi);
250 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msi, CTLFLAG_RW, &pci_do_msi, 1,
251 "Enable support for MSI interrupts");
252
253 static int pci_do_msix = 0;
254 TUNABLE_INT("hw.pci.enable_msix", &pci_do_msix);
255 SYSCTL_INT(_hw_pci, OID_AUTO, enable_msix, CTLFLAG_RW, &pci_do_msix, 1,
256 "Enable support for MSI-X interrupts");
257
258 static int pci_honor_msi_blacklist = 1;
259 TUNABLE_INT("hw.pci.honor_msi_blacklist", &pci_honor_msi_blacklist);
260 SYSCTL_INT(_hw_pci, OID_AUTO, honor_msi_blacklist, CTLFLAG_RD,
261 &pci_honor_msi_blacklist, 1, "Honor chipset blacklist for MSI");
262
263 /* Find a device_t by bus/slot/function */
264
265 device_t
266 pci_find_bsf(uint8_t bus, uint8_t slot, uint8_t func)
267 {
268 struct pci_devinfo *dinfo;
269
270 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
271 if ((dinfo->cfg.bus == bus) &&
272 (dinfo->cfg.slot == slot) &&
273 (dinfo->cfg.func == func)) {
274 return (dinfo->cfg.dev);
275 }
276 }
277
278 return (NULL);
279 }
280
281 /* Find a device_t by vendor/device ID */
282
283 device_t
284 pci_find_device(uint16_t vendor, uint16_t device)
285 {
286 struct pci_devinfo *dinfo;
287
288 STAILQ_FOREACH(dinfo, &pci_devq, pci_links) {
289 if ((dinfo->cfg.vendor == vendor) &&
290 (dinfo->cfg.device == device)) {
291 return (dinfo->cfg.dev);
292 }
293 }
294
295 return (NULL);
296 }
297
298 /* return base address of memory or port map */
299
300 static uint32_t
301 pci_mapbase(unsigned mapreg)
302 {
303 int mask = 0x03;
304 if ((mapreg & 0x01) == 0)
305 mask = 0x0f;
306 return (mapreg & ~mask);
307 }
308
309 /* return map type of memory or port map */
310
311 static int
312 pci_maptype(unsigned mapreg)
313 {
314 static uint8_t maptype[0x10] = {
315 PCI_MAPMEM, PCI_MAPPORT,
316 PCI_MAPMEM, 0,
317 PCI_MAPMEM, PCI_MAPPORT,
318 0, 0,
319 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
320 PCI_MAPMEM|PCI_MAPMEMP, 0,
321 PCI_MAPMEM|PCI_MAPMEMP, PCI_MAPPORT,
322 0, 0,
323 };
324
325 return maptype[mapreg & 0x0f];
326 }
327
328 /* return log2 of map size decoded for memory or port map */
329
330 static int
331 pci_mapsize(unsigned testval)
332 {
333 int ln2size;
334
335 testval = pci_mapbase(testval);
336 ln2size = 0;
337 if (testval != 0) {
338 while ((testval & 1) == 0)
339 {
340 ln2size++;
341 testval >>= 1;
342 }
343 }
344 return (ln2size);
345 }
346
347 /* return log2 of address range supported by map register */
348
349 static int
350 pci_maprange(unsigned mapreg)
351 {
352 int ln2range = 0;
353 switch (mapreg & 0x07) {
354 case 0x00:
355 case 0x01:
356 case 0x05:
357 ln2range = 32;
358 break;
359 case 0x02:
360 ln2range = 20;
361 break;
362 case 0x04:
363 ln2range = 64;
364 break;
365 }
366 return (ln2range);
367 }
368
369 /* adjust some values from PCI 1.0 devices to match 2.0 standards ... */
370
371 static void
372 pci_fixancient(pcicfgregs *cfg)
373 {
374 if (cfg->hdrtype != 0)
375 return;
376
377 /* PCI to PCI bridges use header type 1 */
378 if (cfg->baseclass == PCIC_BRIDGE && cfg->subclass == PCIS_BRIDGE_PCI)
379 cfg->hdrtype = 1;
380 }
381
382 /* extract header type specific config data */
383
384 static void
385 pci_hdrtypedata(device_t pcib, int b, int s, int f, pcicfgregs *cfg)
386 {
387 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
388 switch (cfg->hdrtype) {
389 case 0:
390 cfg->subvendor = REG(PCIR_SUBVEND_0, 2);
391 cfg->subdevice = REG(PCIR_SUBDEV_0, 2);
392 cfg->nummaps = PCI_MAXMAPS_0;
393 break;
394 case 1:
395 cfg->nummaps = PCI_MAXMAPS_1;
396 break;
397 case 2:
398 cfg->subvendor = REG(PCIR_SUBVEND_2, 2);
399 cfg->subdevice = REG(PCIR_SUBDEV_2, 2);
400 cfg->nummaps = PCI_MAXMAPS_2;
401 break;
402 }
403 #undef REG
404 }
405
406 /* read configuration header into pcicfgregs structure */
407
408 struct pci_devinfo *
409 pci_read_device(device_t pcib, int b, int s, int f, size_t size)
410 {
411 #define REG(n, w) PCIB_READ_CONFIG(pcib, b, s, f, n, w)
412 pcicfgregs *cfg = NULL;
413 struct pci_devinfo *devlist_entry;
414 struct devlist *devlist_head;
415
416 devlist_head = &pci_devq;
417
418 devlist_entry = NULL;
419
420 if (REG(PCIR_DEVVENDOR, 4) != -1) {
421 devlist_entry = malloc(size, M_DEVBUF, M_WAITOK | M_ZERO);
422 if (devlist_entry == NULL)
423 return (NULL);
424
425 cfg = &devlist_entry->cfg;
426
427 cfg->bus = b;
428 cfg->slot = s;
429 cfg->func = f;
430 cfg->vendor = REG(PCIR_VENDOR, 2);
431 cfg->device = REG(PCIR_DEVICE, 2);
432 cfg->cmdreg = REG(PCIR_COMMAND, 2);
433 cfg->statreg = REG(PCIR_STATUS, 2);
434 cfg->baseclass = REG(PCIR_CLASS, 1);
435 cfg->subclass = REG(PCIR_SUBCLASS, 1);
436 cfg->progif = REG(PCIR_PROGIF, 1);
437 cfg->revid = REG(PCIR_REVID, 1);
438 cfg->hdrtype = REG(PCIR_HDRTYPE, 1);
439 cfg->cachelnsz = REG(PCIR_CACHELNSZ, 1);
440 cfg->lattimer = REG(PCIR_LATTIMER, 1);
441 cfg->intpin = REG(PCIR_INTPIN, 1);
442 cfg->intline = REG(PCIR_INTLINE, 1);
443
444 cfg->mingnt = REG(PCIR_MINGNT, 1);
445 cfg->maxlat = REG(PCIR_MAXLAT, 1);
446
447 cfg->mfdev = (cfg->hdrtype & PCIM_MFDEV) != 0;
448 cfg->hdrtype &= ~PCIM_MFDEV;
449
450 pci_fixancient(cfg);
451 pci_hdrtypedata(pcib, b, s, f, cfg);
452
453 if (REG(PCIR_STATUS, 2) & PCIM_STATUS_CAPPRESENT)
454 pci_read_extcap(pcib, cfg);
455
456 STAILQ_INSERT_TAIL(devlist_head, devlist_entry, pci_links);
457
458 devlist_entry->conf.pc_sel.pc_bus = cfg->bus;
459 devlist_entry->conf.pc_sel.pc_dev = cfg->slot;
460 devlist_entry->conf.pc_sel.pc_func = cfg->func;
461 devlist_entry->conf.pc_hdr = cfg->hdrtype;
462
463 devlist_entry->conf.pc_subvendor = cfg->subvendor;
464 devlist_entry->conf.pc_subdevice = cfg->subdevice;
465 devlist_entry->conf.pc_vendor = cfg->vendor;
466 devlist_entry->conf.pc_device = cfg->device;
467
468 devlist_entry->conf.pc_class = cfg->baseclass;
469 devlist_entry->conf.pc_subclass = cfg->subclass;
470 devlist_entry->conf.pc_progif = cfg->progif;
471 devlist_entry->conf.pc_revid = cfg->revid;
472
473 pci_numdevs++;
474 pci_generation++;
475 }
476 return (devlist_entry);
477 #undef REG
478 }
479
480 static void
481 pci_read_extcap(device_t pcib, pcicfgregs *cfg)
482 {
483 #define REG(n, w) PCIB_READ_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, w)
484 #define WREG(n, v, w) PCIB_WRITE_CONFIG(pcib, cfg->bus, cfg->slot, cfg->func, n, v, w)
485 #if defined(__i386__) || defined(__amd64__)
486 uint64_t addr;
487 #endif
488 uint32_t val;
489 int ptr, nextptr, ptrptr;
490
491 switch (cfg->hdrtype & PCIM_HDRTYPE) {
492 case 0:
493 case 1:
494 ptrptr = PCIR_CAP_PTR;
495 break;
496 case 2:
497 ptrptr = PCIR_CAP_PTR_2; /* cardbus capabilities ptr */
498 break;
499 default:
500 return; /* no extended capabilities support */
501 }
502 nextptr = REG(ptrptr, 1); /* sanity check? */
503
504 /*
505 * Read capability entries.
506 */
507 while (nextptr != 0) {
508 /* Sanity check */
509 if (nextptr > 255) {
510 printf("illegal PCI extended capability offset %d\n",
511 nextptr);
512 return;
513 }
514 /* Find the next entry */
515 ptr = nextptr;
516 nextptr = REG(ptr + PCICAP_NEXTPTR, 1);
517
518 /* Process this entry */
519 switch (REG(ptr + PCICAP_ID, 1)) {
520 case PCIY_PMG: /* PCI power management */
521 if (cfg->pp.pp_cap == 0) {
522 cfg->pp.pp_cap = REG(ptr + PCIR_POWER_CAP, 2);
523 cfg->pp.pp_status = ptr + PCIR_POWER_STATUS;
524 cfg->pp.pp_pmcsr = ptr + PCIR_POWER_PMCSR;
525 if ((nextptr - ptr) > PCIR_POWER_DATA)
526 cfg->pp.pp_data = ptr + PCIR_POWER_DATA;
527 }
528 break;
529 #if defined(__i386__) || defined(__amd64__)
530 case PCIY_HT: /* HyperTransport */
531 /* Determine HT-specific capability type. */
532 val = REG(ptr + PCIR_HT_COMMAND, 2);
533 switch (val & PCIM_HTCMD_CAP_MASK) {
534 case PCIM_HTCAP_MSI_MAPPING:
535 if (!(val & PCIM_HTCMD_MSI_FIXED)) {
536 /* Sanity check the mapping window. */
537 addr = REG(ptr + PCIR_HTMSI_ADDRESS_HI,
538 4);
539 addr <<= 32;
540 addr = REG(ptr + PCIR_HTMSI_ADDRESS_LO,
541 4);
542 if (addr != MSI_INTEL_ADDR_BASE)
543 device_printf(pcib,
544 "HT Bridge at %d:%d:%d has non-default MSI window 0x%llx\n",
545 cfg->bus, cfg->slot,
546 cfg->func, (long long)addr);
547 }
548
549 /* Enable MSI -> HT mapping. */
550 val |= PCIM_HTCMD_MSI_ENABLE;
551 WREG(ptr + PCIR_HT_COMMAND, val, 2);
552 break;
553 }
554 break;
555 #endif
556 case PCIY_MSI: /* PCI MSI */
557 cfg->msi.msi_location = ptr;
558 cfg->msi.msi_ctrl = REG(ptr + PCIR_MSI_CTRL, 2);
559 cfg->msi.msi_msgnum = 1 << ((cfg->msi.msi_ctrl &
560 PCIM_MSICTRL_MMC_MASK)>>1);
561 break;
562 case PCIY_MSIX: /* PCI MSI-X */
563 cfg->msix.msix_location = ptr;
564 cfg->msix.msix_ctrl = REG(ptr + PCIR_MSIX_CTRL, 2);
565 cfg->msix.msix_msgnum = (cfg->msix.msix_ctrl &
566 PCIM_MSIXCTRL_TABLE_SIZE) + 1;
567 val = REG(ptr + PCIR_MSIX_TABLE, 4);
568 cfg->msix.msix_table_bar = PCIR_BAR(val &
569 PCIM_MSIX_BIR_MASK);
570 cfg->msix.msix_table_offset = val & ~PCIM_MSIX_BIR_MASK;
571 val = REG(ptr + PCIR_MSIX_PBA, 4);
572 cfg->msix.msix_pba_bar = PCIR_BAR(val &
573 PCIM_MSIX_BIR_MASK);
574 cfg->msix.msix_pba_offset = val & ~PCIM_MSIX_BIR_MASK;
575 break;
576 case PCIY_SUBVENDOR:
577 /* Should always be true. */
578 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1) {
579 val = REG(ptr + PCIR_SUBVENDCAP_ID, 4);
580 cfg->subvendor = val & 0xffff;
581 cfg->subdevice = val >> 16;
582 }
583 break;
584 case PCIY_PCIX: /* PCI-X */
585 /*
586 * Assume we have a PCI-X chipset if we have
587 * at least one PCI-PCI bridge with a PCI-X
588 * capability. Note that some systems with
589 * PCI-express or HT chipsets might match on
590 * this check as well.
591 */
592 if ((cfg->hdrtype & PCIM_HDRTYPE) == 1)
593 pcix_chipset = 1;
594 break;
595 case PCIY_EXPRESS: /* PCI-express */
596 /*
597 * Assume we have a PCI-express chipset if we have
598 * at least one PCI-express root port.
599 */
600 val = REG(ptr + PCIR_EXPRESS_FLAGS, 2);
601 if ((val & PCIM_EXP_FLAGS_TYPE) ==
602 PCIM_EXP_TYPE_ROOT_PORT)
603 pcie_chipset = 1;
604 break;
605 default:
606 break;
607 }
608 }
609 #undef REG
610 #undef WREG
611 }
612
613 /*
614 * Return the offset in configuration space of the requested extended
615 * capability entry or 0 if the specified capability was not found.
616 */
617 int
618 pci_find_extcap_method(device_t dev, device_t child, int capability,
619 int *capreg)
620 {
621 struct pci_devinfo *dinfo = device_get_ivars(child);
622 pcicfgregs *cfg = &dinfo->cfg;
623 u_int32_t status;
624 u_int8_t ptr;
625
626 /*
627 * Check the CAP_LIST bit of the PCI status register first.
628 */
629 status = pci_read_config(child, PCIR_STATUS, 2);
630 if (!(status & PCIM_STATUS_CAPPRESENT))
631 return (ENXIO);
632
633 /*
634 * Determine the start pointer of the capabilities list.
635 */
636 switch (cfg->hdrtype & PCIM_HDRTYPE) {
637 case 0:
638 case 1:
639 ptr = PCIR_CAP_PTR;
640 break;
641 case 2:
642 ptr = PCIR_CAP_PTR_2;
643 break;
644 default:
645 /* XXX: panic? */
646 return (ENXIO); /* no extended capabilities support */
647 }
648 ptr = pci_read_config(child, ptr, 1);
649
650 /*
651 * Traverse the capabilities list.
652 */
653 while (ptr != 0) {
654 if (pci_read_config(child, ptr + PCICAP_ID, 1) == capability) {
655 if (capreg != NULL)
656 *capreg = ptr;
657 return (0);
658 }
659 ptr = pci_read_config(child, ptr + PCICAP_NEXTPTR, 1);
660 }
661
662 return (ENOENT);
663 }
664
665 /*
666 * Support for MSI-X message interrupts.
667 */
668 void
669 pci_enable_msix(device_t dev, u_int index, uint64_t address, uint32_t data)
670 {
671 struct pci_devinfo *dinfo = device_get_ivars(dev);
672 struct pcicfg_msix *msix = &dinfo->cfg.msix;
673 uint32_t offset;
674
675 KASSERT(msix->msix_table_len > index, ("bogus index"));
676 offset = msix->msix_table_offset + index * 16;
677 bus_write_4(msix->msix_table_res, offset, address & 0xffffffff);
678 bus_write_4(msix->msix_table_res, offset + 4, address >> 32);
679 bus_write_4(msix->msix_table_res, offset + 8, data);
680 }
681
682 void
683 pci_mask_msix(device_t dev, u_int index)
684 {
685 struct pci_devinfo *dinfo = device_get_ivars(dev);
686 struct pcicfg_msix *msix = &dinfo->cfg.msix;
687 uint32_t offset, val;
688
689 KASSERT(msix->msix_msgnum > index, ("bogus index"));
690 offset = msix->msix_table_offset + index * 16 + 12;
691 val = bus_read_4(msix->msix_table_res, offset);
692 if (!(val & PCIM_MSIX_VCTRL_MASK)) {
693 val |= PCIM_MSIX_VCTRL_MASK;
694 bus_write_4(msix->msix_table_res, offset, val);
695 }
696 }
697
698 void
699 pci_unmask_msix(device_t dev, u_int index)
700 {
701 struct pci_devinfo *dinfo = device_get_ivars(dev);
702 struct pcicfg_msix *msix = &dinfo->cfg.msix;
703 uint32_t offset, val;
704
705 KASSERT(msix->msix_table_len > index, ("bogus index"));
706 offset = msix->msix_table_offset + index * 16 + 12;
707 val = bus_read_4(msix->msix_table_res, offset);
708 if (val & PCIM_MSIX_VCTRL_MASK) {
709 val &= ~PCIM_MSIX_VCTRL_MASK;
710 bus_write_4(msix->msix_table_res, offset, val);
711 }
712 }
713
714 int
715 pci_pending_msix(device_t dev, u_int index)
716 {
717 struct pci_devinfo *dinfo = device_get_ivars(dev);
718 struct pcicfg_msix *msix = &dinfo->cfg.msix;
719 uint32_t offset, bit;
720
721 KASSERT(msix->msix_table_len > index, ("bogus index"));
722 offset = msix->msix_pba_offset + (index / 32) * 4;
723 bit = 1 << index % 32;
724 return (bus_read_4(msix->msix_pba_res, offset) & bit);
725 }
726
727 /*
728 * Restore MSI-X registers and table during resume. If MSI-X is
729 * enabled then walk the virtual table to restore the actual MSI-X
730 * table.
731 */
732 static void
733 pci_resume_msix(device_t dev)
734 {
735 struct pci_devinfo *dinfo = device_get_ivars(dev);
736 struct pcicfg_msix *msix = &dinfo->cfg.msix;
737 struct msix_table_entry *mte;
738 struct msix_vector *mv;
739 int i;
740
741 if (msix->msix_alloc > 0) {
742 /* First, mask all vectors. */
743 for (i = 0; i < msix->msix_msgnum; i++)
744 pci_mask_msix(dev, i);
745
746 /* Second, program any messages with at least one handler. */
747 for (i = 0; i < msix->msix_table_len; i++) {
748 mte = &msix->msix_table[i];
749 if (mte->mte_vector == 0 || mte->mte_handlers == 0)
750 continue;
751 mv = &msix->msix_vectors[mte->mte_vector - 1];
752 pci_enable_msix(dev, i, mv->mv_address, mv->mv_data);
753 pci_unmask_msix(dev, i);
754 }
755 }
756 pci_write_config(dev, msix->msix_location + PCIR_MSIX_CTRL,
757 msix->msix_ctrl, 2);
758 }
759
760 /*
761 * Attempt to allocate *count MSI-X messages. The actual number allocated is
762 * returned in *count. After this function returns, each message will be
763 * available to the driver as SYS_RES_IRQ resources starting at rid 1.
764 */
765 int
766 pci_alloc_msix_method(device_t dev, device_t child, int *count)
767 {
768 struct pci_devinfo *dinfo = device_get_ivars(child);
769 pcicfgregs *cfg = &dinfo->cfg;
770 struct resource_list_entry *rle;
771 int actual, error, i, irq, max;
772
773 /* Don't let count == 0 get us into trouble. */
774 if (*count == 0)
775 return (EINVAL);
776
777 /* If rid 0 is allocated, then fail. */
778 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
779 if (rle != NULL && rle->res != NULL)
780 return (ENXIO);
781
782 /* Already have allocated messages? */
783 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
784 return (ENXIO);
785
786 /* If MSI is blacklisted for this system, fail. */
787 if (pci_msi_blacklisted())
788 return (ENXIO);
789
790 /* MSI-X capability present? */
791 if (cfg->msix.msix_location == 0 || !pci_do_msix)
792 return (ENODEV);
793
794 /* Make sure the appropriate BARs are mapped. */
795 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
796 cfg->msix.msix_table_bar);
797 if (rle == NULL || rle->res == NULL ||
798 !(rman_get_flags(rle->res) & RF_ACTIVE))
799 return (ENXIO);
800 cfg->msix.msix_table_res = rle->res;
801 if (cfg->msix.msix_pba_bar != cfg->msix.msix_table_bar) {
802 rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY,
803 cfg->msix.msix_pba_bar);
804 if (rle == NULL || rle->res == NULL ||
805 !(rman_get_flags(rle->res) & RF_ACTIVE))
806 return (ENXIO);
807 }
808 cfg->msix.msix_pba_res = rle->res;
809
810 if (bootverbose)
811 device_printf(child,
812 "attempting to allocate %d MSI-X vectors (%d supported)\n",
813 *count, cfg->msix.msix_msgnum);
814 max = min(*count, cfg->msix.msix_msgnum);
815 for (i = 0; i < max; i++) {
816 /* Allocate a message. */
817 error = PCIB_ALLOC_MSIX(device_get_parent(dev), child, &irq);
818 if (error)
819 break;
820 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1, irq,
821 irq, 1);
822 }
823 actual = i;
824
825 if (bootverbose) {
826 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 1);
827 if (actual == 1)
828 device_printf(child, "using IRQ %lu for MSI-X\n",
829 rle->start);
830 else {
831 int run;
832
833 /*
834 * Be fancy and try to print contiguous runs of
835 * IRQ values as ranges. 'irq' is the previous IRQ.
836 * 'run' is true if we are in a range.
837 */
838 device_printf(child, "using IRQs %lu", rle->start);
839 irq = rle->start;
840 run = 0;
841 for (i = 1; i < actual; i++) {
842 rle = resource_list_find(&dinfo->resources,
843 SYS_RES_IRQ, i + 1);
844
845 /* Still in a run? */
846 if (rle->start == irq + 1) {
847 run = 1;
848 irq++;
849 continue;
850 }
851
852 /* Finish previous range. */
853 if (run) {
854 printf("-%d", irq);
855 run = 0;
856 }
857
858 /* Start new range. */
859 printf(",%lu", rle->start);
860 irq = rle->start;
861 }
862
863 /* Unfinished range? */
864 if (run)
865 printf("-%d", irq);
866 printf(" for MSI-X\n");
867 }
868 }
869
870 /* Mask all vectors. */
871 for (i = 0; i < cfg->msix.msix_msgnum; i++)
872 pci_mask_msix(child, i);
873
874 /* Allocate and initialize vector data and virtual table. */
875 cfg->msix.msix_vectors = malloc(sizeof(struct msix_vector) * actual,
876 M_DEVBUF, M_WAITOK | M_ZERO);
877 cfg->msix.msix_table = malloc(sizeof(struct msix_table_entry) * actual,
878 M_DEVBUF, M_WAITOK | M_ZERO);
879 for (i = 0; i < actual; i++) {
880 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
881 cfg->msix.msix_vectors[i].mv_irq = rle->start;
882 cfg->msix.msix_table[i].mte_vector = i + 1;
883 }
884
885 /* Update control register to enable MSI-X. */
886 cfg->msix.msix_ctrl |= PCIM_MSIXCTRL_MSIX_ENABLE;
887 pci_write_config(child, cfg->msix.msix_location + PCIR_MSIX_CTRL,
888 cfg->msix.msix_ctrl, 2);
889
890 /* Update counts of alloc'd messages. */
891 cfg->msix.msix_alloc = actual;
892 cfg->msix.msix_table_len = actual;
893 *count = actual;
894 return (0);
895 }
896
897 static int
898 pci_release_msix(device_t dev, device_t child)
899 {
900 struct pci_devinfo *dinfo = device_get_ivars(child);
901 struct pcicfg_msix *msix = &dinfo->cfg.msix;
902 struct resource_list_entry *rle;
903 int i;
904
905 /* Do we have any messages to release? */
906 if (msix->msix_alloc == 0)
907 return (ENODEV);
908
909 /* Make sure none of the resources are allocated. */
910 for (i = 0; i < msix->msix_table_len; i++) {
911 if (msix->msix_table[i].mte_vector == 0)
912 continue;
913 if (msix->msix_table[i].mte_handlers > 0)
914 return (EBUSY);
915 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
916 KASSERT(rle != NULL, ("missing resource"));
917 if (rle->res != NULL)
918 return (EBUSY);
919 }
920
921 /* Update control register to disable MSI-X. */
922 msix->msix_ctrl &= ~PCIM_MSIXCTRL_MSIX_ENABLE;
923 pci_write_config(child, msix->msix_location + PCIR_MSIX_CTRL,
924 msix->msix_ctrl, 2);
925
926 /* Free the resource list entries. */
927 for (i = 0; i < msix->msix_table_len; i++) {
928 if (msix->msix_table[i].mte_vector == 0)
929 continue;
930 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
931 }
932 free(msix->msix_table, M_DEVBUF);
933 msix->msix_table_len = 0;
934
935 /* Release the IRQs. */
936 for (i = 0; i < msix->msix_alloc; i++)
937 PCIB_RELEASE_MSIX(device_get_parent(dev), child,
938 msix->msix_vectors[i].mv_irq);
939 free(msix->msix_vectors, M_DEVBUF);
940 msix->msix_alloc = 0;
941 return (0);
942 }
943
944 /*
945 * Return the max supported MSI-X messages this device supports.
946 * Basically, assuming the MD code can alloc messages, this function
947 * should return the maximum value that pci_alloc_msix() can return.
948 * Thus, it is subject to the tunables, etc.
949 */
950 int
951 pci_msix_count_method(device_t dev, device_t child)
952 {
953 struct pci_devinfo *dinfo = device_get_ivars(child);
954 struct pcicfg_msix *msix = &dinfo->cfg.msix;
955
956 if (pci_do_msix && msix->msix_location != 0)
957 return (msix->msix_msgnum);
958 return (0);
959 }
960
961 /*
962 * Support for MSI message signalled interrupts.
963 */
964 void
965 pci_enable_msi(device_t dev, uint64_t address, uint16_t data)
966 {
967 struct pci_devinfo *dinfo = device_get_ivars(dev);
968 struct pcicfg_msi *msi = &dinfo->cfg.msi;
969
970 /* Write data and address values. */
971 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
972 address & 0xffffffff, 4);
973 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
974 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR_HIGH,
975 address >> 32, 4);
976 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA_64BIT,
977 data, 2);
978 } else
979 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA, data,
980 2);
981
982 /* Enable MSI in the control register. */
983 msi->msi_ctrl |= PCIM_MSICTRL_MSI_ENABLE;
984 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
985 2);
986 }
987
988 void
989 pci_disable_msi(device_t dev)
990 {
991 struct pci_devinfo *dinfo = device_get_ivars(dev);
992 struct pcicfg_msi *msi = &dinfo->cfg.msi;
993
994 /* Disable MSI in the control register. */
995 msi->msi_ctrl &= ~PCIM_MSICTRL_MSI_ENABLE;
996 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
997 2);
998 }
999
1000 /*
1001 * Restore MSI registers during resume. If MSI is enabled then
1002 * restore the data and address registers in addition to the control
1003 * register.
1004 */
1005 static void
1006 pci_resume_msi(device_t dev)
1007 {
1008 struct pci_devinfo *dinfo = device_get_ivars(dev);
1009 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1010 uint64_t address;
1011 uint16_t data;
1012
1013 if (msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE) {
1014 address = msi->msi_addr;
1015 data = msi->msi_data;
1016 pci_write_config(dev, msi->msi_location + PCIR_MSI_ADDR,
1017 address & 0xffffffff, 4);
1018 if (msi->msi_ctrl & PCIM_MSICTRL_64BIT) {
1019 pci_write_config(dev, msi->msi_location +
1020 PCIR_MSI_ADDR_HIGH, address >> 32, 4);
1021 pci_write_config(dev, msi->msi_location +
1022 PCIR_MSI_DATA_64BIT, data, 2);
1023 } else
1024 pci_write_config(dev, msi->msi_location + PCIR_MSI_DATA,
1025 data, 2);
1026 }
1027 pci_write_config(dev, msi->msi_location + PCIR_MSI_CTRL, msi->msi_ctrl,
1028 2);
1029 }
1030
1031 int
1032 pci_remap_msi_irq(device_t dev, u_int irq)
1033 {
1034 struct pci_devinfo *dinfo = device_get_ivars(dev);
1035 pcicfgregs *cfg = &dinfo->cfg;
1036 struct resource_list_entry *rle;
1037 struct msix_table_entry *mte;
1038 struct msix_vector *mv;
1039 device_t bus;
1040 uint64_t addr;
1041 uint32_t data;
1042 int error, i, j;
1043
1044 bus = device_get_parent(dev);
1045
1046 /*
1047 * Handle MSI first. We try to find this IRQ among our list
1048 * of MSI IRQs. If we find it, we request updated address and
1049 * data registers and apply the results.
1050 */
1051 if (cfg->msi.msi_alloc > 0) {
1052
1053 /* If we don't have any active handlers, nothing to do. */
1054 if (cfg->msi.msi_handlers == 0)
1055 return (0);
1056 for (i = 0; i < cfg->msi.msi_alloc; i++) {
1057 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ,
1058 i + 1);
1059 if (rle->start == irq) {
1060 error = PCIB_MAP_MSI(device_get_parent(bus),
1061 dev, irq, &addr, &data);
1062 if (error)
1063 return (error);
1064 pci_disable_msi(dev);
1065 dinfo->cfg.msi.msi_addr = addr;
1066 dinfo->cfg.msi.msi_data = data;
1067 pci_enable_msi(dev, addr, data);
1068 return (0);
1069 }
1070 }
1071 return (ENOENT);
1072 }
1073
1074 /*
1075 * For MSI-X, we check to see if we have this IRQ. If we do,
1076 * we request the updated mapping info. If that works, we go
1077 * through all the slots that use this IRQ and update them.
1078 */
1079 if (cfg->msix.msix_alloc > 0) {
1080 for (i = 0; i < cfg->msix.msix_alloc; i++) {
1081 mv = &cfg->msix.msix_vectors[i];
1082 if (mv->mv_irq == irq) {
1083 error = PCIB_MAP_MSI(device_get_parent(bus),
1084 dev, irq, &addr, &data);
1085 if (error)
1086 return (error);
1087 mv->mv_address = addr;
1088 mv->mv_data = data;
1089 for (j = 0; j < cfg->msix.msix_table_len; j++) {
1090 mte = &cfg->msix.msix_table[j];
1091 if (mte->mte_vector != i + 1)
1092 continue;
1093 if (mte->mte_handlers == 0)
1094 continue;
1095 pci_mask_msix(dev, j);
1096 pci_enable_msix(dev, j, addr, data);
1097 pci_unmask_msix(dev, j);
1098 }
1099 }
1100 }
1101 return (ENOENT);
1102 }
1103
1104 return (ENOENT);
1105 }
1106
1107 /*
1108 * Returns true if the specified device is blacklisted because MSI
1109 * doesn't work.
1110 */
1111 int
1112 pci_msi_device_blacklisted(device_t dev)
1113 {
1114 struct pci_quirk *q;
1115
1116 if (!pci_honor_msi_blacklist)
1117 return (0);
1118
1119 for (q = &pci_quirks[0]; q->devid; q++) {
1120 if (q->devid == pci_get_devid(dev) &&
1121 q->type == PCI_QUIRK_DISABLE_MSI)
1122 return (1);
1123 }
1124 return (0);
1125 }
1126
1127 /*
1128 * Determine if MSI is blacklisted globally on this sytem. Currently,
1129 * we just check for blacklisted chipsets as represented by the
1130 * host-PCI bridge at device 0:0:0. In the future, it may become
1131 * necessary to check other system attributes, such as the kenv values
1132 * that give the motherboard manufacturer and model number.
1133 */
1134 static int
1135 pci_msi_blacklisted(void)
1136 {
1137 device_t dev;
1138
1139 if (!pci_honor_msi_blacklist)
1140 return (0);
1141
1142 /* Blacklist all non-PCI-express and non-PCI-X chipsets. */
1143 if (!(pcie_chipset || pcix_chipset))
1144 return (1);
1145
1146 dev = pci_find_bsf(0, 0, 0);
1147 if (dev != NULL)
1148 return (pci_msi_device_blacklisted(dev));
1149 return (0);
1150 }
1151
1152 /*
1153 * Attempt to allocate *count MSI messages. The actual number allocated is
1154 * returned in *count. After this function returns, each message will be
1155 * available to the driver as SYS_RES_IRQ resources starting at a rid 1.
1156 */
1157 int
1158 pci_alloc_msi_method(device_t dev, device_t child, int *count)
1159 {
1160 struct pci_devinfo *dinfo = device_get_ivars(child);
1161 pcicfgregs *cfg = &dinfo->cfg;
1162 struct resource_list_entry *rle;
1163 int actual, error, i, irqs[32];
1164 uint16_t ctrl;
1165
1166 /* Don't let count == 0 get us into trouble. */
1167 if (*count == 0)
1168 return (EINVAL);
1169
1170 /* If rid 0 is allocated, then fail. */
1171 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, 0);
1172 if (rle != NULL && rle->res != NULL)
1173 return (ENXIO);
1174
1175 /* Already have allocated messages? */
1176 if (cfg->msi.msi_alloc != 0 || cfg->msix.msix_alloc != 0)
1177 return (ENXIO);
1178
1179 /* If MSI is blacklisted for this system, fail. */
1180 if (pci_msi_blacklisted())
1181 return (ENXIO);
1182
1183 /* MSI capability present? */
1184 if (cfg->msi.msi_location == 0 || !pci_do_msi)
1185 return (ENODEV);
1186
1187 if (bootverbose)
1188 device_printf(child,
1189 "attempting to allocate %d MSI vectors (%d supported)\n",
1190 *count, cfg->msi.msi_msgnum);
1191
1192 /* Don't ask for more than the device supports. */
1193 actual = min(*count, cfg->msi.msi_msgnum);
1194
1195 /* Don't ask for more than 32 messages. */
1196 actual = min(actual, 32);
1197
1198 /* MSI requires power of 2 number of messages. */
1199 if (!powerof2(actual))
1200 return (EINVAL);
1201
1202 for (;;) {
1203 /* Try to allocate N messages. */
1204 error = PCIB_ALLOC_MSI(device_get_parent(dev), child, actual,
1205 cfg->msi.msi_msgnum, irqs);
1206 if (error == 0)
1207 break;
1208 if (actual == 1)
1209 return (error);
1210
1211 /* Try N / 2. */
1212 actual >>= 1;
1213 }
1214
1215 /*
1216 * We now have N actual messages mapped onto SYS_RES_IRQ
1217 * resources in the irqs[] array, so add new resources
1218 * starting at rid 1.
1219 */
1220 for (i = 0; i < actual; i++)
1221 resource_list_add(&dinfo->resources, SYS_RES_IRQ, i + 1,
1222 irqs[i], irqs[i], 1);
1223
1224 if (bootverbose) {
1225 if (actual == 1)
1226 device_printf(child, "using IRQ %d for MSI\n", irqs[0]);
1227 else {
1228 int run;
1229
1230 /*
1231 * Be fancy and try to print contiguous runs
1232 * of IRQ values as ranges. 'run' is true if
1233 * we are in a range.
1234 */
1235 device_printf(child, "using IRQs %d", irqs[0]);
1236 run = 0;
1237 for (i = 1; i < actual; i++) {
1238
1239 /* Still in a run? */
1240 if (irqs[i] == irqs[i - 1] + 1) {
1241 run = 1;
1242 continue;
1243 }
1244
1245 /* Finish previous range. */
1246 if (run) {
1247 printf("-%d", irqs[i - 1]);
1248 run = 0;
1249 }
1250
1251 /* Start new range. */
1252 printf(",%d", irqs[i]);
1253 }
1254
1255 /* Unfinished range? */
1256 if (run)
1257 printf("-%d", irqs[actual - 1]);
1258 printf(" for MSI\n");
1259 }
1260 }
1261
1262 /* Update control register with actual count. */
1263 ctrl = cfg->msi.msi_ctrl;
1264 ctrl &= ~PCIM_MSICTRL_MME_MASK;
1265 ctrl |= (ffs(actual) - 1) << 4;
1266 cfg->msi.msi_ctrl = ctrl;
1267 pci_write_config(child, cfg->msi.msi_location + PCIR_MSI_CTRL, ctrl, 2);
1268
1269 /* Update counts of alloc'd messages. */
1270 cfg->msi.msi_alloc = actual;
1271 cfg->msi.msi_handlers = 0;
1272 *count = actual;
1273 return (0);
1274 }
1275
1276 /* Release the MSI messages associated with this device. */
1277 int
1278 pci_release_msi_method(device_t dev, device_t child)
1279 {
1280 struct pci_devinfo *dinfo = device_get_ivars(child);
1281 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1282 struct resource_list_entry *rle;
1283 int error, i, irqs[32];
1284
1285 /* Try MSI-X first. */
1286 error = pci_release_msix(dev, child);
1287 if (error != ENODEV)
1288 return (error);
1289
1290 /* Do we have any messages to release? */
1291 if (msi->msi_alloc == 0)
1292 return (ENODEV);
1293 KASSERT(msi->msi_alloc <= 32, ("more than 32 alloc'd messages"));
1294
1295 /* Make sure none of the resources are allocated. */
1296 if (msi->msi_handlers > 0)
1297 return (EBUSY);
1298 for (i = 0; i < msi->msi_alloc; i++) {
1299 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, i + 1);
1300 KASSERT(rle != NULL, ("missing MSI resource"));
1301 if (rle->res != NULL)
1302 return (EBUSY);
1303 irqs[i] = rle->start;
1304 }
1305
1306 /* Update control register with 0 count. */
1307 KASSERT(!(msi->msi_ctrl & PCIM_MSICTRL_MSI_ENABLE),
1308 ("%s: MSI still enabled", __func__));
1309 msi->msi_ctrl &= ~PCIM_MSICTRL_MME_MASK;
1310 pci_write_config(child, msi->msi_location + PCIR_MSI_CTRL,
1311 msi->msi_ctrl, 2);
1312
1313 /* Release the messages. */
1314 PCIB_RELEASE_MSI(device_get_parent(dev), child, msi->msi_alloc, irqs);
1315 for (i = 0; i < msi->msi_alloc; i++)
1316 resource_list_delete(&dinfo->resources, SYS_RES_IRQ, i + 1);
1317
1318 /* Update alloc count. */
1319 msi->msi_alloc = 0;
1320 msi->msi_addr = 0;
1321 msi->msi_data = 0;
1322 return (0);
1323 }
1324
1325 /*
1326 * Return the max supported MSI messages this device supports.
1327 * Basically, assuming the MD code can alloc messages, this function
1328 * should return the maximum value that pci_alloc_msi() can return.
1329 * Thus, it is subject to the tunables, etc.
1330 */
1331 int
1332 pci_msi_count_method(device_t dev, device_t child)
1333 {
1334 struct pci_devinfo *dinfo = device_get_ivars(child);
1335 struct pcicfg_msi *msi = &dinfo->cfg.msi;
1336
1337 if (pci_do_msi && msi->msi_location != 0)
1338 return (msi->msi_msgnum);
1339 return (0);
1340 }
1341
1342 /* free pcicfgregs structure and all depending data structures */
1343
1344 int
1345 pci_freecfg(struct pci_devinfo *dinfo)
1346 {
1347 struct devlist *devlist_head;
1348
1349 devlist_head = &pci_devq;
1350
1351 STAILQ_REMOVE(devlist_head, dinfo, pci_devinfo, pci_links);
1352 free(dinfo, M_DEVBUF);
1353
1354 /* increment the generation count */
1355 pci_generation++;
1356
1357 /* we're losing one device */
1358 pci_numdevs--;
1359 return (0);
1360 }
1361
1362 /*
1363 * PCI power manangement
1364 */
1365 int
1366 pci_set_powerstate_method(device_t dev, device_t child, int state)
1367 {
1368 struct pci_devinfo *dinfo = device_get_ivars(child);
1369 pcicfgregs *cfg = &dinfo->cfg;
1370 uint16_t status;
1371 int result, oldstate, highest, delay;
1372
1373 if (cfg->pp.pp_cap == 0)
1374 return (EOPNOTSUPP);
1375
1376 /*
1377 * Optimize a no state change request away. While it would be OK to
1378 * write to the hardware in theory, some devices have shown odd
1379 * behavior when going from D3 -> D3.
1380 */
1381 oldstate = pci_get_powerstate(child);
1382 if (oldstate == state)
1383 return (0);
1384
1385 /*
1386 * The PCI power management specification states that after a state
1387 * transition between PCI power states, system software must
1388 * guarantee a minimal delay before the function accesses the device.
1389 * Compute the worst case delay that we need to guarantee before we
1390 * access the device. Many devices will be responsive much more
1391 * quickly than this delay, but there are some that don't respond
1392 * instantly to state changes. Transitions to/from D3 state require
1393 * 10ms, while D2 requires 200us, and D0/1 require none. The delay
1394 * is done below with DELAY rather than a sleeper function because
1395 * this function can be called from contexts where we cannot sleep.
1396 */
1397 highest = (oldstate > state) ? oldstate : state;
1398 if (highest == PCI_POWERSTATE_D3)
1399 delay = 10000;
1400 else if (highest == PCI_POWERSTATE_D2)
1401 delay = 200;
1402 else
1403 delay = 0;
1404 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2)
1405 & ~PCIM_PSTAT_DMASK;
1406 result = 0;
1407 switch (state) {
1408 case PCI_POWERSTATE_D0:
1409 status |= PCIM_PSTAT_D0;
1410 break;
1411 case PCI_POWERSTATE_D1:
1412 if ((cfg->pp.pp_cap & PCIM_PCAP_D1SUPP) == 0)
1413 return (EOPNOTSUPP);
1414 status |= PCIM_PSTAT_D1;
1415 break;
1416 case PCI_POWERSTATE_D2:
1417 if ((cfg->pp.pp_cap & PCIM_PCAP_D2SUPP) == 0)
1418 return (EOPNOTSUPP);
1419 status |= PCIM_PSTAT_D2;
1420 break;
1421 case PCI_POWERSTATE_D3:
1422 status |= PCIM_PSTAT_D3;
1423 break;
1424 default:
1425 return (EINVAL);
1426 }
1427
1428 if (bootverbose)
1429 printf(
1430 "pci%d:%d:%d: Transition from D%d to D%d\n",
1431 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func,
1432 oldstate, state);
1433
1434 PCI_WRITE_CONFIG(dev, child, cfg->pp.pp_status, status, 2);
1435 if (delay)
1436 DELAY(delay);
1437 return (0);
1438 }
1439
1440 int
1441 pci_get_powerstate_method(device_t dev, device_t child)
1442 {
1443 struct pci_devinfo *dinfo = device_get_ivars(child);
1444 pcicfgregs *cfg = &dinfo->cfg;
1445 uint16_t status;
1446 int result;
1447
1448 if (cfg->pp.pp_cap != 0) {
1449 status = PCI_READ_CONFIG(dev, child, cfg->pp.pp_status, 2);
1450 switch (status & PCIM_PSTAT_DMASK) {
1451 case PCIM_PSTAT_D0:
1452 result = PCI_POWERSTATE_D0;
1453 break;
1454 case PCIM_PSTAT_D1:
1455 result = PCI_POWERSTATE_D1;
1456 break;
1457 case PCIM_PSTAT_D2:
1458 result = PCI_POWERSTATE_D2;
1459 break;
1460 case PCIM_PSTAT_D3:
1461 result = PCI_POWERSTATE_D3;
1462 break;
1463 default:
1464 result = PCI_POWERSTATE_UNKNOWN;
1465 break;
1466 }
1467 } else {
1468 /* No support, device is always at D0 */
1469 result = PCI_POWERSTATE_D0;
1470 }
1471 return (result);
1472 }
1473
1474 /*
1475 * Some convenience functions for PCI device drivers.
1476 */
1477
1478 static __inline void
1479 pci_set_command_bit(device_t dev, device_t child, uint16_t bit)
1480 {
1481 uint16_t command;
1482
1483 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1484 command |= bit;
1485 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1486 }
1487
1488 static __inline void
1489 pci_clear_command_bit(device_t dev, device_t child, uint16_t bit)
1490 {
1491 uint16_t command;
1492
1493 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1494 command &= ~bit;
1495 PCI_WRITE_CONFIG(dev, child, PCIR_COMMAND, command, 2);
1496 }
1497
1498 int
1499 pci_enable_busmaster_method(device_t dev, device_t child)
1500 {
1501 pci_set_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1502 return (0);
1503 }
1504
1505 int
1506 pci_disable_busmaster_method(device_t dev, device_t child)
1507 {
1508 pci_clear_command_bit(dev, child, PCIM_CMD_BUSMASTEREN);
1509 return (0);
1510 }
1511
1512 int
1513 pci_enable_io_method(device_t dev, device_t child, int space)
1514 {
1515 uint16_t command;
1516 uint16_t bit;
1517 char *error;
1518
1519 bit = 0;
1520 error = NULL;
1521
1522 switch(space) {
1523 case SYS_RES_IOPORT:
1524 bit = PCIM_CMD_PORTEN;
1525 error = "port";
1526 break;
1527 case SYS_RES_MEMORY:
1528 bit = PCIM_CMD_MEMEN;
1529 error = "memory";
1530 break;
1531 default:
1532 return (EINVAL);
1533 }
1534 pci_set_command_bit(dev, child, bit);
1535 /* Some devices seem to need a brief stall here, what do to? */
1536 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1537 if (command & bit)
1538 return (0);
1539 device_printf(child, "failed to enable %s mapping!\n", error);
1540 return (ENXIO);
1541 }
1542
1543 int
1544 pci_disable_io_method(device_t dev, device_t child, int space)
1545 {
1546 uint16_t command;
1547 uint16_t bit;
1548 char *error;
1549
1550 bit = 0;
1551 error = NULL;
1552
1553 switch(space) {
1554 case SYS_RES_IOPORT:
1555 bit = PCIM_CMD_PORTEN;
1556 error = "port";
1557 break;
1558 case SYS_RES_MEMORY:
1559 bit = PCIM_CMD_MEMEN;
1560 error = "memory";
1561 break;
1562 default:
1563 return (EINVAL);
1564 }
1565 pci_clear_command_bit(dev, child, bit);
1566 command = PCI_READ_CONFIG(dev, child, PCIR_COMMAND, 2);
1567 if (command & bit) {
1568 device_printf(child, "failed to disable %s mapping!\n", error);
1569 return (ENXIO);
1570 }
1571 return (0);
1572 }
1573
1574 /*
1575 * New style pci driver. Parent device is either a pci-host-bridge or a
1576 * pci-pci-bridge. Both kinds are represented by instances of pcib.
1577 */
1578
1579 void
1580 pci_print_verbose(struct pci_devinfo *dinfo)
1581 {
1582 if (bootverbose) {
1583 pcicfgregs *cfg = &dinfo->cfg;
1584
1585 printf("found->\tvendor=0x%04x, dev=0x%04x, revid=0x%02x\n",
1586 cfg->vendor, cfg->device, cfg->revid);
1587 printf("\tbus=%d, slot=%d, func=%d\n",
1588 cfg->bus, cfg->slot, cfg->func);
1589 printf("\tclass=%02x-%02x-%02x, hdrtype=0x%02x, mfdev=%d\n",
1590 cfg->baseclass, cfg->subclass, cfg->progif, cfg->hdrtype,
1591 cfg->mfdev);
1592 printf("\tcmdreg=0x%04x, statreg=0x%04x, cachelnsz=%d (dwords)\n",
1593 cfg->cmdreg, cfg->statreg, cfg->cachelnsz);
1594 printf("\tlattimer=0x%02x (%d ns), mingnt=0x%02x (%d ns), maxlat=0x%02x (%d ns)\n",
1595 cfg->lattimer, cfg->lattimer * 30, cfg->mingnt,
1596 cfg->mingnt * 250, cfg->maxlat, cfg->maxlat * 250);
1597 if (cfg->intpin > 0)
1598 printf("\tintpin=%c, irq=%d\n",
1599 cfg->intpin +'a' -1, cfg->intline);
1600 if (cfg->pp.pp_cap) {
1601 uint16_t status;
1602
1603 status = pci_read_config(cfg->dev, cfg->pp.pp_status, 2);
1604 printf("\tpowerspec %d supports D0%s%s D3 current D%d\n",
1605 cfg->pp.pp_cap & PCIM_PCAP_SPEC,
1606 cfg->pp.pp_cap & PCIM_PCAP_D1SUPP ? " D1" : "",
1607 cfg->pp.pp_cap & PCIM_PCAP_D2SUPP ? " D2" : "",
1608 status & PCIM_PSTAT_DMASK);
1609 }
1610 if (cfg->msi.msi_location) {
1611 int ctrl;
1612
1613 ctrl = cfg->msi.msi_ctrl;
1614 printf("\tMSI supports %d message%s%s%s\n",
1615 cfg->msi.msi_msgnum,
1616 (cfg->msi.msi_msgnum == 1) ? "" : "s",
1617 (ctrl & PCIM_MSICTRL_64BIT) ? ", 64 bit" : "",
1618 (ctrl & PCIM_MSICTRL_VECTOR) ? ", vector masks":"");
1619 }
1620 if (cfg->msix.msix_location) {
1621 printf("\tMSI-X supports %d message%s ",
1622 cfg->msix.msix_msgnum,
1623 (cfg->msix.msix_msgnum == 1) ? "" : "s");
1624 if (cfg->msix.msix_table_bar == cfg->msix.msix_pba_bar)
1625 printf("in map 0x%x\n",
1626 cfg->msix.msix_table_bar);
1627 else
1628 printf("in maps 0x%x and 0x%x\n",
1629 cfg->msix.msix_table_bar,
1630 cfg->msix.msix_pba_bar);
1631 }
1632 }
1633 }
1634
1635 static int
1636 pci_porten(device_t pcib, int b, int s, int f)
1637 {
1638 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1639 & PCIM_CMD_PORTEN) != 0;
1640 }
1641
1642 static int
1643 pci_memen(device_t pcib, int b, int s, int f)
1644 {
1645 return (PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2)
1646 & PCIM_CMD_MEMEN) != 0;
1647 }
1648
1649 /*
1650 * Add a resource based on a pci map register. Return 1 if the map
1651 * register is a 32bit map register or 2 if it is a 64bit register.
1652 */
1653 static int
1654 pci_add_map(device_t pcib, device_t bus, device_t dev,
1655 int b, int s, int f, int reg, struct resource_list *rl, int force,
1656 int prefetch)
1657 {
1658 uint32_t map;
1659 uint64_t base;
1660 uint64_t start, end, count;
1661 uint8_t ln2size;
1662 uint8_t ln2range;
1663 uint32_t testval;
1664 uint16_t cmd;
1665 int type;
1666 int barlen;
1667 struct resource *res;
1668
1669 map = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1670 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, 0xffffffff, 4);
1671 testval = PCIB_READ_CONFIG(pcib, b, s, f, reg, 4);
1672 PCIB_WRITE_CONFIG(pcib, b, s, f, reg, map, 4);
1673
1674 if (pci_maptype(map) & PCI_MAPMEM)
1675 type = SYS_RES_MEMORY;
1676 else
1677 type = SYS_RES_IOPORT;
1678 ln2size = pci_mapsize(testval);
1679 ln2range = pci_maprange(testval);
1680 base = pci_mapbase(map);
1681 barlen = ln2range == 64 ? 2 : 1;
1682
1683 /*
1684 * For I/O registers, if bottom bit is set, and the next bit up
1685 * isn't clear, we know we have a BAR that doesn't conform to the
1686 * spec, so ignore it. Also, sanity check the size of the data
1687 * areas to the type of memory involved. Memory must be at least
1688 * 16 bytes in size, while I/O ranges must be at least 4.
1689 */
1690 if ((testval & 0x1) == 0x1 &&
1691 (testval & 0x2) != 0)
1692 return (barlen);
1693 if ((type == SYS_RES_MEMORY && ln2size < 4) ||
1694 (type == SYS_RES_IOPORT && ln2size < 2))
1695 return (barlen);
1696
1697 if (ln2range == 64)
1698 /* Read the other half of a 64bit map register */
1699 base |= (uint64_t) PCIB_READ_CONFIG(pcib, b, s, f, reg + 4, 4) << 32;
1700
1701 if (bootverbose) {
1702 printf("\tmap[%02x]: type %x, range %2d, base %08x, size %2d",
1703 reg, pci_maptype(map), ln2range,
1704 (unsigned int) base, ln2size);
1705 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1706 printf(", port disabled\n");
1707 else if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1708 printf(", memory disabled\n");
1709 else
1710 printf(", enabled\n");
1711 }
1712
1713 /*
1714 * If base is 0, then we have problems. It is best to ignore
1715 * such entries for the moment. These will be allocated later if
1716 * the driver specifically requests them. However, some
1717 * removable busses look better when all resources are allocated,
1718 * so allow '' to be overriden.
1719 *
1720 * Similarly treat maps whose values is the same as the test value
1721 * read back. These maps have had all f's written to them by the
1722 * BIOS in an attempt to disable the resources.
1723 */
1724 if (!force && (base == 0 || map == testval))
1725 return (barlen);
1726
1727 /*
1728 * This code theoretically does the right thing, but has
1729 * undesirable side effects in some cases where peripherals
1730 * respond oddly to having these bits enabled. Let the user
1731 * be able to turn them off (since pci_enable_io_modes is 1 by
1732 * default).
1733 */
1734 if (pci_enable_io_modes) {
1735 /* Turn on resources that have been left off by a lazy BIOS */
1736 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f)) {
1737 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1738 cmd |= PCIM_CMD_PORTEN;
1739 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1740 }
1741 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f)) {
1742 cmd = PCIB_READ_CONFIG(pcib, b, s, f, PCIR_COMMAND, 2);
1743 cmd |= PCIM_CMD_MEMEN;
1744 PCIB_WRITE_CONFIG(pcib, b, s, f, PCIR_COMMAND, cmd, 2);
1745 }
1746 } else {
1747 if (type == SYS_RES_IOPORT && !pci_porten(pcib, b, s, f))
1748 return (barlen);
1749 if (type == SYS_RES_MEMORY && !pci_memen(pcib, b, s, f))
1750 return (barlen);
1751 }
1752
1753 count = 1 << ln2size;
1754 if (base == 0 || base == pci_mapbase(testval)) {
1755 start = 0; /* Let the parent deside */
1756 end = ~0ULL;
1757 } else {
1758 start = base;
1759 end = base + (1 << ln2size) - 1;
1760 }
1761 resource_list_add(rl, type, reg, start, end, count);
1762
1763 /*
1764 * Not quite sure what to do on failure of allocating the resource
1765 * since I can postulate several right answers.
1766 */
1767 res = resource_list_alloc(rl, bus, dev, type, ®, start, end, count,
1768 prefetch ? RF_PREFETCHABLE : 0);
1769 if (res != NULL)
1770 pci_write_config(dev, reg, rman_get_start(res), 4);
1771 return (barlen);
1772 }
1773
1774 /*
1775 * For ATA devices we need to decide early what addressing mode to use.
1776 * Legacy demands that the primary and secondary ATA ports sits on the
1777 * same addresses that old ISA hardware did. This dictates that we use
1778 * those addresses and ignore the BAR's if we cannot set PCI native
1779 * addressing mode.
1780 */
1781 static void
1782 pci_ata_maps(device_t pcib, device_t bus, device_t dev, int b,
1783 int s, int f, struct resource_list *rl, int force, uint32_t prefetchmask)
1784 {
1785 int rid, type, progif;
1786 #if 0
1787 /* if this device supports PCI native addressing use it */
1788 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1789 if ((progif & 0x8a) == 0x8a) {
1790 if (pci_mapbase(pci_read_config(dev, PCIR_BAR(0), 4)) &&
1791 pci_mapbase(pci_read_config(dev, PCIR_BAR(2), 4))) {
1792 printf("Trying ATA native PCI addressing mode\n");
1793 pci_write_config(dev, PCIR_PROGIF, progif | 0x05, 1);
1794 }
1795 }
1796 #endif
1797 progif = pci_read_config(dev, PCIR_PROGIF, 1);
1798 type = SYS_RES_IOPORT;
1799 if (progif & PCIP_STORAGE_IDE_MODEPRIM) {
1800 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(0), rl, force,
1801 prefetchmask & (1 << 0));
1802 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(1), rl, force,
1803 prefetchmask & (1 << 1));
1804 } else {
1805 rid = PCIR_BAR(0);
1806 resource_list_add(rl, type, rid, 0x1f0, 0x1f7, 8);
1807 resource_list_alloc(rl, bus, dev, type, &rid, 0x1f0, 0x1f7, 8,
1808 0);
1809 rid = PCIR_BAR(1);
1810 resource_list_add(rl, type, rid, 0x3f6, 0x3f6, 1);
1811 resource_list_alloc(rl, bus, dev, type, &rid, 0x3f6, 0x3f6, 1,
1812 0);
1813 }
1814 if (progif & PCIP_STORAGE_IDE_MODESEC) {
1815 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(2), rl, force,
1816 prefetchmask & (1 << 2));
1817 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(3), rl, force,
1818 prefetchmask & (1 << 3));
1819 } else {
1820 rid = PCIR_BAR(2);
1821 resource_list_add(rl, type, rid, 0x170, 0x177, 8);
1822 resource_list_alloc(rl, bus, dev, type, &rid, 0x170, 0x177, 8,
1823 0);
1824 rid = PCIR_BAR(3);
1825 resource_list_add(rl, type, rid, 0x376, 0x376, 1);
1826 resource_list_alloc(rl, bus, dev, type, &rid, 0x376, 0x376, 1,
1827 0);
1828 }
1829 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(4), rl, force,
1830 prefetchmask & (1 << 4));
1831 pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(5), rl, force,
1832 prefetchmask & (1 << 5));
1833 }
1834
1835 static void
1836 pci_assign_interrupt(device_t bus, device_t dev, int force_route)
1837 {
1838 struct pci_devinfo *dinfo = device_get_ivars(dev);
1839 pcicfgregs *cfg = &dinfo->cfg;
1840 char tunable_name[64];
1841 int irq;
1842
1843 /* Has to have an intpin to have an interrupt. */
1844 if (cfg->intpin == 0)
1845 return;
1846
1847 /* Let the user override the IRQ with a tunable. */
1848 irq = PCI_INVALID_IRQ;
1849 snprintf(tunable_name, sizeof(tunable_name), "hw.pci%d.%d.INT%c.irq",
1850 cfg->bus, cfg->slot, cfg->intpin + 'A' - 1);
1851 if (TUNABLE_INT_FETCH(tunable_name, &irq) && (irq >= 255 || irq <= 0))
1852 irq = PCI_INVALID_IRQ;
1853
1854 /*
1855 * If we didn't get an IRQ via the tunable, then we either use the
1856 * IRQ value in the intline register or we ask the bus to route an
1857 * interrupt for us. If force_route is true, then we only use the
1858 * value in the intline register if the bus was unable to assign an
1859 * IRQ.
1860 */
1861 if (!PCI_INTERRUPT_VALID(irq)) {
1862 if (!PCI_INTERRUPT_VALID(cfg->intline) || force_route)
1863 irq = PCI_ASSIGN_INTERRUPT(bus, dev);
1864 if (!PCI_INTERRUPT_VALID(irq))
1865 irq = cfg->intline;
1866 }
1867
1868 /* If after all that we don't have an IRQ, just bail. */
1869 if (!PCI_INTERRUPT_VALID(irq))
1870 return;
1871
1872 /* Update the config register if it changed. */
1873 if (irq != cfg->intline) {
1874 cfg->intline = irq;
1875 pci_write_config(dev, PCIR_INTLINE, irq, 1);
1876 }
1877
1878 /* Add this IRQ as rid 0 interrupt resource. */
1879 resource_list_add(&dinfo->resources, SYS_RES_IRQ, 0, irq, irq, 1);
1880 }
1881
1882 void
1883 pci_add_resources(device_t bus, device_t dev, int force, uint32_t prefetchmask)
1884 {
1885 device_t pcib;
1886 struct pci_devinfo *dinfo = device_get_ivars(dev);
1887 pcicfgregs *cfg = &dinfo->cfg;
1888 struct resource_list *rl = &dinfo->resources;
1889 struct pci_quirk *q;
1890 int b, i, f, s;
1891
1892 pcib = device_get_parent(bus);
1893
1894 b = cfg->bus;
1895 s = cfg->slot;
1896 f = cfg->func;
1897
1898 /* ATA devices needs special map treatment */
1899 if ((pci_get_class(dev) == PCIC_STORAGE) &&
1900 (pci_get_subclass(dev) == PCIS_STORAGE_IDE) &&
1901 (pci_get_progif(dev) & PCIP_STORAGE_IDE_MASTERDEV))
1902 pci_ata_maps(pcib, bus, dev, b, s, f, rl, force, prefetchmask);
1903 else
1904 for (i = 0; i < cfg->nummaps;)
1905 i += pci_add_map(pcib, bus, dev, b, s, f, PCIR_BAR(i),
1906 rl, force, prefetchmask & (1 << i));
1907
1908 /*
1909 * Add additional, quirked resources.
1910 */
1911 for (q = &pci_quirks[0]; q->devid; q++) {
1912 if (q->devid == ((cfg->device << 16) | cfg->vendor)
1913 && q->type == PCI_QUIRK_MAP_REG)
1914 pci_add_map(pcib, bus, dev, b, s, f, q->arg1, rl,
1915 force, 0);
1916 }
1917
1918 if (cfg->intpin > 0 && PCI_INTERRUPT_VALID(cfg->intline)) {
1919 #if defined(__ia64__) || defined(__i386__) || defined(__amd64__) || \
1920 defined(__arm__) || defined(__alpha__)
1921 /*
1922 * Try to re-route interrupts. Sometimes the BIOS or
1923 * firmware may leave bogus values in these registers.
1924 * If the re-route fails, then just stick with what we
1925 * have.
1926 */
1927 pci_assign_interrupt(bus, dev, 1);
1928 #else
1929 pci_assign_interrupt(bus, dev, 0);
1930 #endif
1931 }
1932 }
1933
1934 void
1935 pci_add_children(device_t dev, int busno, size_t dinfo_size)
1936 {
1937 #define REG(n, w) PCIB_READ_CONFIG(pcib, busno, s, f, n, w)
1938 device_t pcib = device_get_parent(dev);
1939 struct pci_devinfo *dinfo;
1940 int maxslots;
1941 int s, f, pcifunchigh;
1942 uint8_t hdrtype;
1943
1944 KASSERT(dinfo_size >= sizeof(struct pci_devinfo),
1945 ("dinfo_size too small"));
1946 maxslots = PCIB_MAXSLOTS(pcib);
1947 for (s = 0; s <= maxslots; s++) {
1948 pcifunchigh = 0;
1949 f = 0;
1950 DELAY(1);
1951 hdrtype = REG(PCIR_HDRTYPE, 1);
1952 if ((hdrtype & PCIM_HDRTYPE) > PCI_MAXHDRTYPE)
1953 continue;
1954 if (hdrtype & PCIM_MFDEV)
1955 pcifunchigh = PCI_FUNCMAX;
1956 for (f = 0; f <= pcifunchigh; f++) {
1957 dinfo = pci_read_device(pcib, busno, s, f, dinfo_size);
1958 if (dinfo != NULL) {
1959 pci_add_child(dev, dinfo);
1960 }
1961 }
1962 }
1963 #undef REG
1964 }
1965
1966 void
1967 pci_add_child(device_t bus, struct pci_devinfo *dinfo)
1968 {
1969 dinfo->cfg.dev = device_add_child(bus, NULL, -1);
1970 device_set_ivars(dinfo->cfg.dev, dinfo);
1971 resource_list_init(&dinfo->resources);
1972 pci_cfg_save(dinfo->cfg.dev, dinfo, 0);
1973 pci_cfg_restore(dinfo->cfg.dev, dinfo);
1974 pci_print_verbose(dinfo);
1975 pci_add_resources(bus, dinfo->cfg.dev, 0, 0);
1976 }
1977
1978 static int
1979 pci_probe(device_t dev)
1980 {
1981
1982 device_set_desc(dev, "PCI bus");
1983
1984 /* Allow other subclasses to override this driver. */
1985 return (-1000);
1986 }
1987
1988 static int
1989 pci_attach(device_t dev)
1990 {
1991 int busno;
1992
1993 /*
1994 * Since there can be multiple independantly numbered PCI
1995 * busses on some large alpha systems, we can't use the unit
1996 * number to decide what bus we are probing. We ask the parent
1997 * pcib what our bus number is.
1998 */
1999 busno = pcib_get_bus(dev);
2000 if (bootverbose)
2001 device_printf(dev, "physical bus=%d\n", busno);
2002
2003 pci_add_children(dev, busno, sizeof(struct pci_devinfo));
2004
2005 return (bus_generic_attach(dev));
2006 }
2007
2008 int
2009 pci_suspend(device_t dev)
2010 {
2011 int dstate, error, i, numdevs;
2012 device_t acpi_dev, child, *devlist;
2013 struct pci_devinfo *dinfo;
2014
2015 /*
2016 * Save the PCI configuration space for each child and set the
2017 * device in the appropriate power state for this sleep state.
2018 */
2019 acpi_dev = NULL;
2020 if (pci_do_power_resume)
2021 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2022 device_get_children(dev, &devlist, &numdevs);
2023 for (i = 0; i < numdevs; i++) {
2024 child = devlist[i];
2025 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2026 pci_cfg_save(child, dinfo, 0);
2027 }
2028
2029 /* Suspend devices before potentially powering them down. */
2030 error = bus_generic_suspend(dev);
2031 if (error) {
2032 free(devlist, M_TEMP);
2033 return (error);
2034 }
2035
2036 /*
2037 * Always set the device to D3. If ACPI suggests a different
2038 * power state, use it instead. If ACPI is not present, the
2039 * firmware is responsible for managing device power. Skip
2040 * children who aren't attached since they are powered down
2041 * separately. Only manage type 0 devices for now.
2042 */
2043 for (i = 0; acpi_dev && i < numdevs; i++) {
2044 child = devlist[i];
2045 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2046 if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) {
2047 dstate = PCI_POWERSTATE_D3;
2048 ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate);
2049 pci_set_powerstate(child, dstate);
2050 }
2051 }
2052 free(devlist, M_TEMP);
2053 return (0);
2054 }
2055
2056 int
2057 pci_resume(device_t dev)
2058 {
2059 int i, numdevs;
2060 device_t acpi_dev, child, *devlist;
2061 struct pci_devinfo *dinfo;
2062
2063 /*
2064 * Set each child to D0 and restore its PCI configuration space.
2065 */
2066 acpi_dev = NULL;
2067 if (pci_do_power_resume)
2068 acpi_dev = devclass_get_device(devclass_find("acpi"), 0);
2069 device_get_children(dev, &devlist, &numdevs);
2070 for (i = 0; i < numdevs; i++) {
2071 /*
2072 * Notify ACPI we're going to D0 but ignore the result. If
2073 * ACPI is not present, the firmware is responsible for
2074 * managing device power. Only manage type 0 devices for now.
2075 */
2076 child = devlist[i];
2077 dinfo = (struct pci_devinfo *) device_get_ivars(child);
2078 if (acpi_dev && device_is_attached(child) &&
2079 dinfo->cfg.hdrtype == 0) {
2080 ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL);
2081 pci_set_powerstate(child, PCI_POWERSTATE_D0);
2082 }
2083
2084 /* Now the device is powered up, restore its config space. */
2085 pci_cfg_restore(child, dinfo);
2086 }
2087 free(devlist, M_TEMP);
2088 return (bus_generic_resume(dev));
2089 }
2090
2091 static void
2092 pci_load_vendor_data(void)
2093 {
2094 caddr_t vendordata, info;
2095
2096 if ((vendordata = preload_search_by_type("pci_vendor_data")) != NULL) {
2097 info = preload_search_info(vendordata, MODINFO_ADDR);
2098 pci_vendordata = *(char **)info;
2099 info = preload_search_info(vendordata, MODINFO_SIZE);
2100 pci_vendordata_size = *(size_t *)info;
2101 /* terminate the database */
2102 pci_vendordata[pci_vendordata_size] = '\n';
2103 }
2104 }
2105
2106 void
2107 pci_driver_added(device_t dev, driver_t *driver)
2108 {
2109 int numdevs;
2110 device_t *devlist;
2111 device_t child;
2112 struct pci_devinfo *dinfo;
2113 int i;
2114
2115 if (bootverbose)
2116 device_printf(dev, "driver added\n");
2117 DEVICE_IDENTIFY(driver, dev);
2118 device_get_children(dev, &devlist, &numdevs);
2119 for (i = 0; i < numdevs; i++) {
2120 child = devlist[i];
2121 if (device_get_state(child) != DS_NOTPRESENT)
2122 continue;
2123 dinfo = device_get_ivars(child);
2124 pci_print_verbose(dinfo);
2125 if (bootverbose)
2126 printf("pci%d:%d:%d: reprobing on driver added\n",
2127 dinfo->cfg.bus, dinfo->cfg.slot, dinfo->cfg.func);
2128 pci_cfg_restore(child, dinfo);
2129 if (device_probe_and_attach(child) != 0)
2130 pci_cfg_save(child, dinfo, 1);
2131 }
2132 free(devlist, M_TEMP);
2133 }
2134
2135 int
2136 pci_setup_intr(device_t dev, device_t child, struct resource *irq, int flags,
2137 driver_intr_t *intr, void *arg, void **cookiep)
2138 {
2139 struct pci_devinfo *dinfo;
2140 struct msix_table_entry *mte;
2141 struct msix_vector *mv;
2142 uint64_t addr;
2143 uint32_t data;
2144 void *cookie;
2145 int error, rid;
2146
2147 error = bus_generic_setup_intr(dev, child, irq, flags, intr, arg,
2148 &cookie);
2149 if (error)
2150 return (error);
2151
2152 /*
2153 * If this is a direct child, check to see if the interrupt is
2154 * MSI or MSI-X. If so, ask our parent to map the MSI and give
2155 * us the address and data register values. If we fail for some
2156 * reason, teardown the interrupt handler.
2157 */
2158 rid = rman_get_rid(irq);
2159 if (device_get_parent(child) == dev && rid > 0) {
2160 dinfo = device_get_ivars(child);
2161 if (dinfo->cfg.msi.msi_alloc > 0) {
2162 if (dinfo->cfg.msi.msi_addr == 0) {
2163 KASSERT(dinfo->cfg.msi.msi_handlers == 0,
2164 ("MSI has handlers, but vectors not mapped"));
2165 error = PCIB_MAP_MSI(device_get_parent(dev),
2166 child, rman_get_start(irq), &addr, &data);
2167 if (error)
2168 goto bad;
2169 dinfo->cfg.msi.msi_addr = addr;
2170 dinfo->cfg.msi.msi_data = data;
2171 pci_enable_msi(child, addr, data);
2172 }
2173 dinfo->cfg.msi.msi_handlers++;
2174 } else {
2175 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2176 ("No MSI or MSI-X interrupts allocated"));
2177 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2178 ("MSI-X index too high"));
2179 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2180 KASSERT(mte->mte_vector != 0, ("no message vector"));
2181 mv = &dinfo->cfg.msix.msix_vectors[mte->mte_vector - 1];
2182 KASSERT(mv->mv_irq == rman_get_start(irq),
2183 ("IRQ mismatch"));
2184 if (mv->mv_address == 0) {
2185 KASSERT(mte->mte_handlers == 0,
2186 ("MSI-X table entry has handlers, but vector not mapped"));
2187 error = PCIB_MAP_MSI(device_get_parent(dev),
2188 child, rman_get_start(irq), &addr, &data);
2189 if (error)
2190 goto bad;
2191 mv->mv_address = addr;
2192 mv->mv_data = data;
2193 }
2194 if (mte->mte_handlers == 0) {
2195 pci_enable_msix(child, rid - 1, mv->mv_address,
2196 mv->mv_data);
2197 pci_unmask_msix(child, rid - 1);
2198 }
2199 mte->mte_handlers++;
2200 }
2201 bad:
2202 if (error) {
2203 (void)bus_generic_teardown_intr(dev, child, irq,
2204 cookie);
2205 return (error);
2206 }
2207 }
2208 *cookiep = cookie;
2209 return (0);
2210 }
2211
2212 int
2213 pci_teardown_intr(device_t dev, device_t child, struct resource *irq,
2214 void *cookie)
2215 {
2216 struct msix_table_entry *mte;
2217 struct resource_list_entry *rle;
2218 struct pci_devinfo *dinfo;
2219 int error, rid;
2220
2221 /*
2222 * If this is a direct child, check to see if the interrupt is
2223 * MSI or MSI-X. If so, decrement the appropriate handlers
2224 * count and mask the MSI-X message, or disable MSI messages
2225 * if the count drops to 0.
2226 */
2227 if (irq == NULL || !(rman_get_flags(irq) & RF_ACTIVE))
2228 return (EINVAL);
2229 rid = rman_get_rid(irq);
2230 if (device_get_parent(child) == dev && rid > 0) {
2231 dinfo = device_get_ivars(child);
2232 rle = resource_list_find(&dinfo->resources, SYS_RES_IRQ, rid);
2233 if (rle->res != irq)
2234 return (EINVAL);
2235 if (dinfo->cfg.msi.msi_alloc > 0) {
2236 KASSERT(rid <= dinfo->cfg.msi.msi_alloc,
2237 ("MSI-X index too high"));
2238 if (dinfo->cfg.msi.msi_handlers == 0)
2239 return (EINVAL);
2240 dinfo->cfg.msi.msi_handlers--;
2241 if (dinfo->cfg.msi.msi_handlers == 0)
2242 pci_disable_msi(child);
2243 } else {
2244 KASSERT(dinfo->cfg.msix.msix_alloc > 0,
2245 ("No MSI or MSI-X interrupts allocated"));
2246 KASSERT(rid <= dinfo->cfg.msix.msix_table_len,
2247 ("MSI-X index too high"));
2248 mte = &dinfo->cfg.msix.msix_table[rid - 1];
2249 if (mte->mte_handlers == 0)
2250 return (EINVAL);
2251 mte->mte_handlers--;
2252 if (mte->mte_handlers == 0)
2253 pci_mask_msix(child, rid - 1);
2254 }
2255 }
2256 error = bus_generic_teardown_intr(dev, child, irq, cookie);
2257 if (device_get_parent(child) == dev && rid > 0)
2258 KASSERT(error == 0,
2259 ("%s: generic teardown failed for MSI/MSI-X", __func__));
2260 return (error);
2261 }
2262
2263 int
2264 pci_print_child(device_t dev, device_t child)
2265 {
2266 struct pci_devinfo *dinfo;
2267 struct resource_list *rl;
2268 int retval = 0;
2269
2270 dinfo = device_get_ivars(child);
2271 rl = &dinfo->resources;
2272
2273 retval += bus_print_child_header(dev, child);
2274
2275 retval += resource_list_print_type(rl, "port", SYS_RES_IOPORT, "%#lx");
2276 retval += resource_list_print_type(rl, "mem", SYS_RES_MEMORY, "%#lx");
2277 retval += resource_list_print_type(rl, "irq", SYS_RES_IRQ, "%ld");
2278 if (device_get_flags(dev))
2279 retval += printf(" flags %#x", device_get_flags(dev));
2280
2281 retval += printf(" at device %d.%d", pci_get_slot(child),
2282 pci_get_function(child));
2283
2284 retval += bus_print_child_footer(dev, child);
2285
2286 return (retval);
2287 }
2288
2289 static struct
2290 {
2291 int class;
2292 int subclass;
2293 char *desc;
2294 } pci_nomatch_tab[] = {
2295 {PCIC_OLD, -1, "old"},
2296 {PCIC_OLD, PCIS_OLD_NONVGA, "non-VGA display device"},
2297 {PCIC_OLD, PCIS_OLD_VGA, "VGA-compatible display device"},
2298 {PCIC_STORAGE, -1, "mass storage"},
2299 {PCIC_STORAGE, PCIS_STORAGE_SCSI, "SCSI"},
2300 {PCIC_STORAGE, PCIS_STORAGE_IDE, "ATA"},
2301 {PCIC_STORAGE, PCIS_STORAGE_FLOPPY, "floppy disk"},
2302 {PCIC_STORAGE, PCIS_STORAGE_IPI, "IPI"},
2303 {PCIC_STORAGE, PCIS_STORAGE_RAID, "RAID"},
2304 {PCIC_NETWORK, -1, "network"},
2305 {PCIC_NETWORK, PCIS_NETWORK_ETHERNET, "ethernet"},
2306 {PCIC_NETWORK, PCIS_NETWORK_TOKENRING, "token ring"},
2307 {PCIC_NETWORK, PCIS_NETWORK_FDDI, "fddi"},
2308 {PCIC_NETWORK, PCIS_NETWORK_ATM, "ATM"},
2309 {PCIC_NETWORK, PCIS_NETWORK_ISDN, "ISDN"},
2310 {PCIC_DISPLAY, -1, "display"},
2311 {PCIC_DISPLAY, PCIS_DISPLAY_VGA, "VGA"},
2312 {PCIC_DISPLAY, PCIS_DISPLAY_XGA, "XGA"},
2313 {PCIC_DISPLAY, PCIS_DISPLAY_3D, "3D"},
2314 {PCIC_MULTIMEDIA, -1, "multimedia"},
2315 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_VIDEO, "video"},
2316 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_AUDIO, "audio"},
2317 {PCIC_MULTIMEDIA, PCIS_MULTIMEDIA_TELE, "telephony"},
2318 {PCIC_MEMORY, -1, "memory"},
2319 {PCIC_MEMORY, PCIS_MEMORY_RAM, "RAM"},
2320 {PCIC_MEMORY, PCIS_MEMORY_FLASH, "flash"},
2321 {PCIC_BRIDGE, -1, "bridge"},
2322 {PCIC_BRIDGE, PCIS_BRIDGE_HOST, "HOST-PCI"},
2323 {PCIC_BRIDGE, PCIS_BRIDGE_ISA, "PCI-ISA"},
2324 {PCIC_BRIDGE, PCIS_BRIDGE_EISA, "PCI-EISA"},
2325 {PCIC_BRIDGE, PCIS_BRIDGE_MCA, "PCI-MCA"},
2326 {PCIC_BRIDGE, PCIS_BRIDGE_PCI, "PCI-PCI"},
2327 {PCIC_BRIDGE, PCIS_BRIDGE_PCMCIA, "PCI-PCMCIA"},
2328 {PCIC_BRIDGE, PCIS_BRIDGE_NUBUS, "PCI-NuBus"},
2329 {PCIC_BRIDGE, PCIS_BRIDGE_CARDBUS, "PCI-CardBus"},
2330 {PCIC_BRIDGE, PCIS_BRIDGE_RACEWAY, "PCI-RACEway"},
2331 {PCIC_SIMPLECOMM, -1, "simple comms"},
2332 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_UART, "UART"}, /* could detect 16550 */
2333 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_PAR, "parallel port"},
2334 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MULSER, "multiport serial"},
2335 {PCIC_SIMPLECOMM, PCIS_SIMPLECOMM_MODEM, "generic modem"},
2336 {PCIC_BASEPERIPH, -1, "base peripheral"},
2337 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PIC, "interrupt controller"},
2338 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_DMA, "DMA controller"},
2339 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_TIMER, "timer"},
2340 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_RTC, "realtime clock"},
2341 {PCIC_BASEPERIPH, PCIS_BASEPERIPH_PCIHOT, "PCI hot-plug controller"},
2342 {PCIC_INPUTDEV, -1, "input device"},
2343 {PCIC_INPUTDEV, PCIS_INPUTDEV_KEYBOARD, "keyboard"},
2344 {PCIC_INPUTDEV, PCIS_INPUTDEV_DIGITIZER,"digitizer"},
2345 {PCIC_INPUTDEV, PCIS_INPUTDEV_MOUSE, "mouse"},
2346 {PCIC_INPUTDEV, PCIS_INPUTDEV_SCANNER, "scanner"},
2347 {PCIC_INPUTDEV, PCIS_INPUTDEV_GAMEPORT, "gameport"},
2348 {PCIC_DOCKING, -1, "docking station"},
2349 {PCIC_PROCESSOR, -1, "processor"},
2350 {PCIC_SERIALBUS, -1, "serial bus"},
2351 {PCIC_SERIALBUS, PCIS_SERIALBUS_FW, "FireWire"},
2352 {PCIC_SERIALBUS, PCIS_SERIALBUS_ACCESS, "AccessBus"},
2353 {PCIC_SERIALBUS, PCIS_SERIALBUS_SSA, "SSA"},
2354 {PCIC_SERIALBUS, PCIS_SERIALBUS_USB, "USB"},
2355 {PCIC_SERIALBUS, PCIS_SERIALBUS_FC, "Fibre Channel"},
2356 {PCIC_SERIALBUS, PCIS_SERIALBUS_SMBUS, "SMBus"},
2357 {PCIC_WIRELESS, -1, "wireless controller"},
2358 {PCIC_WIRELESS, PCIS_WIRELESS_IRDA, "iRDA"},
2359 {PCIC_WIRELESS, PCIS_WIRELESS_IR, "IR"},
2360 {PCIC_WIRELESS, PCIS_WIRELESS_RF, "RF"},
2361 {PCIC_INTELLIIO, -1, "intelligent I/O controller"},
2362 {PCIC_INTELLIIO, PCIS_INTELLIIO_I2O, "I2O"},
2363 {PCIC_SATCOM, -1, "satellite communication"},
2364 {PCIC_SATCOM, PCIS_SATCOM_TV, "sat TV"},
2365 {PCIC_SATCOM, PCIS_SATCOM_AUDIO, "sat audio"},
2366 {PCIC_SATCOM, PCIS_SATCOM_VOICE, "sat voice"},
2367 {PCIC_SATCOM, PCIS_SATCOM_DATA, "sat data"},
2368 {PCIC_CRYPTO, -1, "encrypt/decrypt"},
2369 {PCIC_CRYPTO, PCIS_CRYPTO_NETCOMP, "network/computer crypto"},
2370 {PCIC_CRYPTO, PCIS_CRYPTO_ENTERTAIN, "entertainment crypto"},
2371 {PCIC_DASP, -1, "dasp"},
2372 {PCIC_DASP, PCIS_DASP_DPIO, "DPIO module"},
2373 {0, 0, NULL}
2374 };
2375
2376 void
2377 pci_probe_nomatch(device_t dev, device_t child)
2378 {
2379 int i;
2380 char *cp, *scp, *device;
2381
2382 /*
2383 * Look for a listing for this device in a loaded device database.
2384 */
2385 if ((device = pci_describe_device(child)) != NULL) {
2386 device_printf(dev, "<%s>", device);
2387 free(device, M_DEVBUF);
2388 } else {
2389 /*
2390 * Scan the class/subclass descriptions for a general
2391 * description.
2392 */
2393 cp = "unknown";
2394 scp = NULL;
2395 for (i = 0; pci_nomatch_tab[i].desc != NULL; i++) {
2396 if (pci_nomatch_tab[i].class == pci_get_class(child)) {
2397 if (pci_nomatch_tab[i].subclass == -1) {
2398 cp = pci_nomatch_tab[i].desc;
2399 } else if (pci_nomatch_tab[i].subclass ==
2400 pci_get_subclass(child)) {
2401 scp = pci_nomatch_tab[i].desc;
2402 }
2403 }
2404 }
2405 device_printf(dev, "<%s%s%s>",
2406 cp ? cp : "",
2407 ((cp != NULL) && (scp != NULL)) ? ", " : "",
2408 scp ? scp : "");
2409 }
2410 printf(" at device %d.%d (no driver attached)\n",
2411 pci_get_slot(child), pci_get_function(child));
2412 if (pci_do_power_nodriver)
2413 pci_cfg_save(child,
2414 (struct pci_devinfo *) device_get_ivars(child), 1);
2415 return;
2416 }
2417
2418 /*
2419 * Parse the PCI device database, if loaded, and return a pointer to a
2420 * description of the device.
2421 *
2422 * The database is flat text formatted as follows:
2423 *
2424 * Any line not in a valid format is ignored.
2425 * Lines are terminated with newline '\n' characters.
2426 *
2427 * A VENDOR line consists of the 4 digit (hex) vendor code, a TAB, then
2428 * the vendor name.
2429 *
2430 * A DEVICE line is entered immediately below the corresponding VENDOR ID.
2431 * - devices cannot be listed without a corresponding VENDOR line.
2432 * A DEVICE line consists of a TAB, the 4 digit (hex) device code,
2433 * another TAB, then the device name.
2434 */
2435
2436 /*
2437 * Assuming (ptr) points to the beginning of a line in the database,
2438 * return the vendor or device and description of the next entry.
2439 * The value of (vendor) or (device) inappropriate for the entry type
2440 * is set to -1. Returns nonzero at the end of the database.
2441 *
2442 * Note that this is slightly unrobust in the face of corrupt data;
2443 * we attempt to safeguard against this by spamming the end of the
2444 * database with a newline when we initialise.
2445 */
2446 static int
2447 pci_describe_parse_line(char **ptr, int *vendor, int *device, char **desc)
2448 {
2449 char *cp = *ptr;
2450 int left;
2451
2452 *device = -1;
2453 *vendor = -1;
2454 **desc = '\0';
2455 for (;;) {
2456 left = pci_vendordata_size - (cp - pci_vendordata);
2457 if (left <= 0) {
2458 *ptr = cp;
2459 return(1);
2460 }
2461
2462 /* vendor entry? */
2463 if (*cp != '\t' &&
2464 sscanf(cp, "%x\t%80[^\n]", vendor, *desc) == 2)
2465 break;
2466 /* device entry? */
2467 if (*cp == '\t' &&
2468 sscanf(cp, "%x\t%80[^\n]", device, *desc) == 2)
2469 break;
2470
2471 /* skip to next line */
2472 while (*cp != '\n' && left > 0) {
2473 cp++;
2474 left--;
2475 }
2476 if (*cp == '\n') {
2477 cp++;
2478 left--;
2479 }
2480 }
2481 /* skip to next line */
2482 while (*cp != '\n' && left > 0) {
2483 cp++;
2484 left--;
2485 }
2486 if (*cp == '\n' && left > 0)
2487 cp++;
2488 *ptr = cp;
2489 return(0);
2490 }
2491
2492 static char *
2493 pci_describe_device(device_t dev)
2494 {
2495 int vendor, device;
2496 char *desc, *vp, *dp, *line;
2497
2498 desc = vp = dp = NULL;
2499
2500 /*
2501 * If we have no vendor data, we can't do anything.
2502 */
2503 if (pci_vendordata == NULL)
2504 goto out;
2505
2506 /*
2507 * Scan the vendor data looking for this device
2508 */
2509 line = pci_vendordata;
2510 if ((vp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2511 goto out;
2512 for (;;) {
2513 if (pci_describe_parse_line(&line, &vendor, &device, &vp))
2514 goto out;
2515 if (vendor == pci_get_vendor(dev))
2516 break;
2517 }
2518 if ((dp = malloc(80, M_DEVBUF, M_NOWAIT)) == NULL)
2519 goto out;
2520 for (;;) {
2521 if (pci_describe_parse_line(&line, &vendor, &device, &dp)) {
2522 *dp = 0;
2523 break;
2524 }
2525 if (vendor != -1) {
2526 *dp = 0;
2527 break;
2528 }
2529 if (device == pci_get_device(dev))
2530 break;
2531 }
2532 if (dp[0] == '\0')
2533 snprintf(dp, 80, "0x%x", pci_get_device(dev));
2534 if ((desc = malloc(strlen(vp) + strlen(dp) + 3, M_DEVBUF, M_NOWAIT)) !=
2535 NULL)
2536 sprintf(desc, "%s, %s", vp, dp);
2537 out:
2538 if (vp != NULL)
2539 free(vp, M_DEVBUF);
2540 if (dp != NULL)
2541 free(dp, M_DEVBUF);
2542 return(desc);
2543 }
2544
2545 int
2546 pci_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
2547 {
2548 struct pci_devinfo *dinfo;
2549 pcicfgregs *cfg;
2550
2551 dinfo = device_get_ivars(child);
2552 cfg = &dinfo->cfg;
2553
2554 switch (which) {
2555 case PCI_IVAR_ETHADDR:
2556 /*
2557 * The generic accessor doesn't deal with failure, so
2558 * we set the return value, then return an error.
2559 */
2560 *((uint8_t **) result) = NULL;
2561 return (EINVAL);
2562 case PCI_IVAR_SUBVENDOR:
2563 *result = cfg->subvendor;
2564 break;
2565 case PCI_IVAR_SUBDEVICE:
2566 *result = cfg->subdevice;
2567 break;
2568 case PCI_IVAR_VENDOR:
2569 *result = cfg->vendor;
2570 break;
2571 case PCI_IVAR_DEVICE:
2572 *result = cfg->device;
2573 break;
2574 case PCI_IVAR_DEVID:
2575 *result = (cfg->device << 16) | cfg->vendor;
2576 break;
2577 case PCI_IVAR_CLASS:
2578 *result = cfg->baseclass;
2579 break;
2580 case PCI_IVAR_SUBCLASS:
2581 *result = cfg->subclass;
2582 break;
2583 case PCI_IVAR_PROGIF:
2584 *result = cfg->progif;
2585 break;
2586 case PCI_IVAR_REVID:
2587 *result = cfg->revid;
2588 break;
2589 case PCI_IVAR_INTPIN:
2590 *result = cfg->intpin;
2591 break;
2592 case PCI_IVAR_IRQ:
2593 *result = cfg->intline;
2594 break;
2595 case PCI_IVAR_BUS:
2596 *result = cfg->bus;
2597 break;
2598 case PCI_IVAR_SLOT:
2599 *result = cfg->slot;
2600 break;
2601 case PCI_IVAR_FUNCTION:
2602 *result = cfg->func;
2603 break;
2604 case PCI_IVAR_CMDREG:
2605 *result = cfg->cmdreg;
2606 break;
2607 case PCI_IVAR_CACHELNSZ:
2608 *result = cfg->cachelnsz;
2609 break;
2610 case PCI_IVAR_MINGNT:
2611 *result = cfg->mingnt;
2612 break;
2613 case PCI_IVAR_MAXLAT:
2614 *result = cfg->maxlat;
2615 break;
2616 case PCI_IVAR_LATTIMER:
2617 *result = cfg->lattimer;
2618 break;
2619 default:
2620 return (ENOENT);
2621 }
2622 return (0);
2623 }
2624
2625 int
2626 pci_write_ivar(device_t dev, device_t child, int which, uintptr_t value)
2627 {
2628 struct pci_devinfo *dinfo;
2629
2630 dinfo = device_get_ivars(child);
2631
2632 switch (which) {
2633 case PCI_IVAR_INTPIN:
2634 dinfo->cfg.intpin = value;
2635 return (0);
2636 case PCI_IVAR_ETHADDR:
2637 case PCI_IVAR_SUBVENDOR:
2638 case PCI_IVAR_SUBDEVICE:
2639 case PCI_IVAR_VENDOR:
2640 case PCI_IVAR_DEVICE:
2641 case PCI_IVAR_DEVID:
2642 case PCI_IVAR_CLASS:
2643 case PCI_IVAR_SUBCLASS:
2644 case PCI_IVAR_PROGIF:
2645 case PCI_IVAR_REVID:
2646 case PCI_IVAR_IRQ:
2647 case PCI_IVAR_BUS:
2648 case PCI_IVAR_SLOT:
2649 case PCI_IVAR_FUNCTION:
2650 return (EINVAL); /* disallow for now */
2651
2652 default:
2653 return (ENOENT);
2654 }
2655 }
2656
2657
2658 #include "opt_ddb.h"
2659 #ifdef DDB
2660 #include <ddb/ddb.h>
2661 #include <sys/cons.h>
2662
2663 /*
2664 * List resources based on pci map registers, used for within ddb
2665 */
2666
2667 DB_SHOW_COMMAND(pciregs, db_pci_dump)
2668 {
2669 struct pci_devinfo *dinfo;
2670 struct devlist *devlist_head;
2671 struct pci_conf *p;
2672 const char *name;
2673 int i, error, none_count, quit;
2674
2675 none_count = 0;
2676 /* get the head of the device queue */
2677 devlist_head = &pci_devq;
2678
2679 /*
2680 * Go through the list of devices and print out devices
2681 */
2682 db_setup_paging(db_simple_pager, &quit, db_lines_per_page);
2683 for (error = 0, i = 0, quit = 0,
2684 dinfo = STAILQ_FIRST(devlist_head);
2685 (dinfo != NULL) && (error == 0) && (i < pci_numdevs) && !quit;
2686 dinfo = STAILQ_NEXT(dinfo, pci_links), i++) {
2687
2688 /* Populate pd_name and pd_unit */
2689 name = NULL;
2690 if (dinfo->cfg.dev)
2691 name = device_get_name(dinfo->cfg.dev);
2692
2693 p = &dinfo->conf;
2694 db_printf("%s%d@pci%d:%d:%d:\tclass=0x%06x card=0x%08x "
2695 "chip=0x%08x rev=0x%02x hdr=0x%02x\n",
2696 (name && *name) ? name : "none",
2697 (name && *name) ? (int)device_get_unit(dinfo->cfg.dev) :
2698 none_count++,
2699 p->pc_sel.pc_bus, p->pc_sel.pc_dev,
2700 p->pc_sel.pc_func, (p->pc_class << 16) |
2701 (p->pc_subclass << 8) | p->pc_progif,
2702 (p->pc_subdevice << 16) | p->pc_subvendor,
2703 (p->pc_device << 16) | p->pc_vendor,
2704 p->pc_revid, p->pc_hdr);
2705 }
2706 }
2707 #endif /* DDB */
2708
2709 static struct resource *
2710 pci_alloc_map(device_t dev, device_t child, int type, int *rid,
2711 u_long start, u_long end, u_long count, u_int flags)
2712 {
2713 struct pci_devinfo *dinfo = device_get_ivars(child);
2714 struct resource_list *rl = &dinfo->resources;
2715 struct resource_list_entry *rle;
2716 struct resource *res;
2717 uint32_t map, testval;
2718 int mapsize;
2719
2720 /*
2721 * Weed out the bogons, and figure out how large the BAR/map
2722 * is. Bars that read back 0 here are bogus and unimplemented.
2723 * Note: atapci in legacy mode are special and handled elsewhere
2724 * in the code. If you have a atapci device in legacy mode and
2725 * it fails here, that other code is broken.
2726 */
2727 res = NULL;
2728 map = pci_read_config(child, *rid, 4);
2729 pci_write_config(child, *rid, 0xffffffff, 4);
2730 testval = pci_read_config(child, *rid, 4);
2731 if (pci_mapbase(testval) == 0)
2732 goto out;
2733 if (pci_maptype(testval) & PCI_MAPMEM) {
2734 if (type != SYS_RES_MEMORY) {
2735 if (bootverbose)
2736 device_printf(dev,
2737 "child %s requested type %d for rid %#x,"
2738 " but the BAR says it is an memio\n",
2739 device_get_nameunit(child), type, *rid);
2740 goto out;
2741 }
2742 } else {
2743 if (type != SYS_RES_IOPORT) {
2744 if (bootverbose)
2745 device_printf(dev,
2746 "child %s requested type %d for rid %#x,"
2747 " but the BAR says it is an ioport\n",
2748 device_get_nameunit(child), type, *rid);
2749 goto out;
2750 }
2751 }
2752 /*
2753 * For real BARs, we need to override the size that
2754 * the driver requests, because that's what the BAR
2755 * actually uses and we would otherwise have a
2756 * situation where we might allocate the excess to
2757 * another driver, which won't work.
2758 */
2759 mapsize = pci_mapsize(testval);
2760 count = 1 << mapsize;
2761 if (RF_ALIGNMENT(flags) < mapsize)
2762 flags = (flags & ~RF_ALIGNMENT_MASK) | RF_ALIGNMENT_LOG2(mapsize);
2763
2764 /*
2765 * Allocate enough resource, and then write back the
2766 * appropriate bar for that resource.
2767 */
2768 res = BUS_ALLOC_RESOURCE(device_get_parent(dev), child, type, rid,
2769 start, end, count, flags);
2770 if (res == NULL) {
2771 device_printf(child,
2772 "%#lx bytes of rid %#x res %d failed (%#lx, %#lx).\n",
2773 count, *rid, type, start, end);
2774 goto out;
2775 }
2776 resource_list_add(rl, type, *rid, start, end, count);
2777 rle = resource_list_find(rl, type, *rid);
2778 if (rle == NULL)
2779 panic("pci_alloc_map: unexpectedly can't find resource.");
2780 rle->res = res;
2781 rle->start = rman_get_start(res);
2782 rle->end = rman_get_end(res);
2783 rle->count = count;
2784 if (bootverbose)
2785 device_printf(child,
2786 "Lazy allocation of %#lx bytes rid %#x type %d at %#lx\n",
2787 count, *rid, type, rman_get_start(res));
2788 map = rman_get_start(res);
2789 out:;
2790 pci_write_config(child, *rid, map, 4);
2791 return (res);
2792 }
2793
2794
2795 struct resource *
2796 pci_alloc_resource(device_t dev, device_t child, int type, int *rid,
2797 u_long start, u_long end, u_long count, u_int flags)
2798 {
2799 struct pci_devinfo *dinfo = device_get_ivars(child);
2800 struct resource_list *rl = &dinfo->resources;
2801 struct resource_list_entry *rle;
2802 pcicfgregs *cfg = &dinfo->cfg;
2803
2804 /*
2805 * Perform lazy resource allocation
2806 */
2807 if (device_get_parent(child) == dev) {
2808 switch (type) {
2809 case SYS_RES_IRQ:
2810 /*
2811 * Can't alloc legacy interrupt once MSI messages
2812 * have been allocated.
2813 */
2814 if (*rid == 0 && (cfg->msi.msi_alloc > 0 ||
2815 cfg->msix.msix_alloc > 0))
2816 return (NULL);
2817 /*
2818 * If the child device doesn't have an
2819 * interrupt routed and is deserving of an
2820 * interrupt, try to assign it one.
2821 */
2822 if (*rid == 0 && !PCI_INTERRUPT_VALID(cfg->intline) &&
2823 (cfg->intpin != 0))
2824 pci_assign_interrupt(dev, child, 0);
2825 break;
2826 case SYS_RES_IOPORT:
2827 case SYS_RES_MEMORY:
2828 if (*rid < PCIR_BAR(cfg->nummaps)) {
2829 /*
2830 * Enable the I/O mode. We should
2831 * also be assigning resources too
2832 * when none are present. The
2833 * resource_list_alloc kind of sorta does
2834 * this...
2835 */
2836 if (PCI_ENABLE_IO(dev, child, type))
2837 return (NULL);
2838 }
2839 rle = resource_list_find(rl, type, *rid);
2840 if (rle == NULL)
2841 return (pci_alloc_map(dev, child, type, rid,
2842 start, end, count, flags));
2843 break;
2844 }
2845 /*
2846 * If we've already allocated the resource, then
2847 * return it now. But first we may need to activate
2848 * it, since we don't allocate the resource as active
2849 * above. Normally this would be done down in the
2850 * nexus, but since we short-circuit that path we have
2851 * to do its job here. Not sure if we should free the
2852 * resource if it fails to activate.
2853 */
2854 rle = resource_list_find(rl, type, *rid);
2855 if (rle != NULL && rle->res != NULL) {
2856 if (bootverbose)
2857 device_printf(child,
2858 "Reserved %#lx bytes for rid %#x type %d at %#lx\n",
2859 rman_get_size(rle->res), *rid, type,
2860 rman_get_start(rle->res));
2861 if ((flags & RF_ACTIVE) &&
2862 bus_generic_activate_resource(dev, child, type,
2863 *rid, rle->res) != 0)
2864 return NULL;
2865 return (rle->res);
2866 }
2867 }
2868 return (resource_list_alloc(rl, dev, child, type, rid,
2869 start, end, count, flags));
2870 }
2871
2872 void
2873 pci_delete_resource(device_t dev, device_t child, int type, int rid)
2874 {
2875 struct pci_devinfo *dinfo;
2876 struct resource_list *rl;
2877 struct resource_list_entry *rle;
2878
2879 if (device_get_parent(child) != dev)
2880 return;
2881
2882 dinfo = device_get_ivars(child);
2883 rl = &dinfo->resources;
2884 rle = resource_list_find(rl, type, rid);
2885 if (rle) {
2886 if (rle->res) {
2887 if (rman_get_device(rle->res) != dev ||
2888 rman_get_flags(rle->res) & RF_ACTIVE) {
2889 device_printf(dev, "delete_resource: "
2890 "Resource still owned by child, oops. "
2891 "(type=%d, rid=%d, addr=%lx)\n",
2892 rle->type, rle->rid,
2893 rman_get_start(rle->res));
2894 return;
2895 }
2896 bus_release_resource(dev, type, rid, rle->res);
2897 }
2898 resource_list_delete(rl, type, rid);
2899 }
2900 /*
2901 * Why do we turn off the PCI configuration BAR when we delete a
2902 * resource? -- imp
2903 */
2904 pci_write_config(child, rid, 0, 4);
2905 BUS_DELETE_RESOURCE(device_get_parent(dev), child, type, rid);
2906 }
2907
2908 struct resource_list *
2909 pci_get_resource_list (device_t dev, device_t child)
2910 {
2911 struct pci_devinfo *dinfo = device_get_ivars(child);
2912
2913 return (&dinfo->resources);
2914 }
2915
2916 uint32_t
2917 pci_read_config_method(device_t dev, device_t child, int reg, int width)
2918 {
2919 struct pci_devinfo *dinfo = device_get_ivars(child);
2920 pcicfgregs *cfg = &dinfo->cfg;
2921
2922 return (PCIB_READ_CONFIG(device_get_parent(dev),
2923 cfg->bus, cfg->slot, cfg->func, reg, width));
2924 }
2925
2926 void
2927 pci_write_config_method(device_t dev, device_t child, int reg,
2928 uint32_t val, int width)
2929 {
2930 struct pci_devinfo *dinfo = device_get_ivars(child);
2931 pcicfgregs *cfg = &dinfo->cfg;
2932
2933 PCIB_WRITE_CONFIG(device_get_parent(dev),
2934 cfg->bus, cfg->slot, cfg->func, reg, val, width);
2935 }
2936
2937 int
2938 pci_child_location_str_method(device_t dev, device_t child, char *buf,
2939 size_t buflen)
2940 {
2941
2942 snprintf(buf, buflen, "slot=%d function=%d", pci_get_slot(child),
2943 pci_get_function(child));
2944 return (0);
2945 }
2946
2947 int
2948 pci_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
2949 size_t buflen)
2950 {
2951 struct pci_devinfo *dinfo;
2952 pcicfgregs *cfg;
2953
2954 dinfo = device_get_ivars(child);
2955 cfg = &dinfo->cfg;
2956 snprintf(buf, buflen, "vendor=0x%04x device=0x%04x subvendor=0x%04x "
2957 "subdevice=0x%04x class=0x%02x%02x%02x", cfg->vendor, cfg->device,
2958 cfg->subvendor, cfg->subdevice, cfg->baseclass, cfg->subclass,
2959 cfg->progif);
2960 return (0);
2961 }
2962
2963 int
2964 pci_assign_interrupt_method(device_t dev, device_t child)
2965 {
2966 struct pci_devinfo *dinfo = device_get_ivars(child);
2967 pcicfgregs *cfg = &dinfo->cfg;
2968
2969 return (PCIB_ROUTE_INTERRUPT(device_get_parent(dev), child,
2970 cfg->intpin));
2971 }
2972
2973 static int
2974 pci_modevent(module_t mod, int what, void *arg)
2975 {
2976 static struct cdev *pci_cdev;
2977
2978 switch (what) {
2979 case MOD_LOAD:
2980 STAILQ_INIT(&pci_devq);
2981 pci_generation = 0;
2982 pci_cdev = make_dev(&pcicdev, 0, UID_ROOT, GID_WHEEL, 0644,
2983 "pci");
2984 pci_load_vendor_data();
2985 break;
2986
2987 case MOD_UNLOAD:
2988 destroy_dev(pci_cdev);
2989 break;
2990 }
2991
2992 return (0);
2993 }
2994
2995 void
2996 pci_cfg_restore(device_t dev, struct pci_devinfo *dinfo)
2997 {
2998 int i;
2999
3000 /*
3001 * Only do header type 0 devices. Type 1 devices are bridges,
3002 * which we know need special treatment. Type 2 devices are
3003 * cardbus bridges which also require special treatment.
3004 * Other types are unknown, and we err on the side of safety
3005 * by ignoring them.
3006 */
3007 if (dinfo->cfg.hdrtype != 0)
3008 return;
3009
3010 /*
3011 * Restore the device to full power mode. We must do this
3012 * before we restore the registers because moving from D3 to
3013 * D0 will cause the chip's BARs and some other registers to
3014 * be reset to some unknown power on reset values. Cut down
3015 * the noise on boot by doing nothing if we are already in
3016 * state D0.
3017 */
3018 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) {
3019 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3020 }
3021 for (i = 0; i < dinfo->cfg.nummaps; i++)
3022 pci_write_config(dev, PCIR_BAR(i), dinfo->cfg.bar[i], 4);
3023 pci_write_config(dev, PCIR_BIOS, dinfo->cfg.bios, 4);
3024 pci_write_config(dev, PCIR_COMMAND, dinfo->cfg.cmdreg, 2);
3025 pci_write_config(dev, PCIR_INTLINE, dinfo->cfg.intline, 1);
3026 pci_write_config(dev, PCIR_INTPIN, dinfo->cfg.intpin, 1);
3027 pci_write_config(dev, PCIR_MINGNT, dinfo->cfg.mingnt, 1);
3028 pci_write_config(dev, PCIR_MAXLAT, dinfo->cfg.maxlat, 1);
3029 pci_write_config(dev, PCIR_CACHELNSZ, dinfo->cfg.cachelnsz, 1);
3030 pci_write_config(dev, PCIR_LATTIMER, dinfo->cfg.lattimer, 1);
3031 pci_write_config(dev, PCIR_PROGIF, dinfo->cfg.progif, 1);
3032 pci_write_config(dev, PCIR_REVID, dinfo->cfg.revid, 1);
3033
3034 /* Restore MSI and MSI-X configurations if they are present. */
3035 if (dinfo->cfg.msi.msi_location != 0)
3036 pci_resume_msi(dev);
3037 if (dinfo->cfg.msix.msix_location != 0)
3038 pci_resume_msix(dev);
3039 }
3040
3041 void
3042 pci_cfg_save(device_t dev, struct pci_devinfo *dinfo, int setstate)
3043 {
3044 int i;
3045 uint32_t cls;
3046 int ps;
3047
3048 /*
3049 * Only do header type 0 devices. Type 1 devices are bridges, which
3050 * we know need special treatment. Type 2 devices are cardbus bridges
3051 * which also require special treatment. Other types are unknown, and
3052 * we err on the side of safety by ignoring them. Powering down
3053 * bridges should not be undertaken lightly.
3054 */
3055 if (dinfo->cfg.hdrtype != 0)
3056 return;
3057 for (i = 0; i < dinfo->cfg.nummaps; i++)
3058 dinfo->cfg.bar[i] = pci_read_config(dev, PCIR_BAR(i), 4);
3059 dinfo->cfg.bios = pci_read_config(dev, PCIR_BIOS, 4);
3060
3061 /*
3062 * Some drivers apparently write to these registers w/o updating our
3063 * cached copy. No harm happens if we update the copy, so do so here
3064 * so we can restore them. The COMMAND register is modified by the
3065 * bus w/o updating the cache. This should represent the normally
3066 * writable portion of the 'defined' part of type 0 headers. In
3067 * theory we also need to save/restore the PCI capability structures
3068 * we know about, but apart from power we don't know any that are
3069 * writable.
3070 */
3071 dinfo->cfg.subvendor = pci_read_config(dev, PCIR_SUBVEND_0, 2);
3072 dinfo->cfg.subdevice = pci_read_config(dev, PCIR_SUBDEV_0, 2);
3073 dinfo->cfg.vendor = pci_read_config(dev, PCIR_VENDOR, 2);
3074 dinfo->cfg.device = pci_read_config(dev, PCIR_DEVICE, 2);
3075 dinfo->cfg.cmdreg = pci_read_config(dev, PCIR_COMMAND, 2);
3076 dinfo->cfg.intline = pci_read_config(dev, PCIR_INTLINE, 1);
3077 dinfo->cfg.intpin = pci_read_config(dev, PCIR_INTPIN, 1);
3078 dinfo->cfg.mingnt = pci_read_config(dev, PCIR_MINGNT, 1);
3079 dinfo->cfg.maxlat = pci_read_config(dev, PCIR_MAXLAT, 1);
3080 dinfo->cfg.cachelnsz = pci_read_config(dev, PCIR_CACHELNSZ, 1);
3081 dinfo->cfg.lattimer = pci_read_config(dev, PCIR_LATTIMER, 1);
3082 dinfo->cfg.baseclass = pci_read_config(dev, PCIR_CLASS, 1);
3083 dinfo->cfg.subclass = pci_read_config(dev, PCIR_SUBCLASS, 1);
3084 dinfo->cfg.progif = pci_read_config(dev, PCIR_PROGIF, 1);
3085 dinfo->cfg.revid = pci_read_config(dev, PCIR_REVID, 1);
3086
3087 /*
3088 * don't set the state for display devices, base peripherals and
3089 * memory devices since bad things happen when they are powered down.
3090 * We should (a) have drivers that can easily detach and (b) use
3091 * generic drivers for these devices so that some device actually
3092 * attaches. We need to make sure that when we implement (a) we don't
3093 * power the device down on a reattach.
3094 */
3095 cls = pci_get_class(dev);
3096 if (!setstate)
3097 return;
3098 switch (pci_do_power_nodriver)
3099 {
3100 case 0: /* NO powerdown at all */
3101 return;
3102 case 1: /* Conservative about what to power down */
3103 if (cls == PCIC_STORAGE)
3104 return;
3105 /*FALLTHROUGH*/
3106 case 2: /* Agressive about what to power down */
3107 if (cls == PCIC_DISPLAY || cls == PCIC_MEMORY ||
3108 cls == PCIC_BASEPERIPH)
3109 return;
3110 /*FALLTHROUGH*/
3111 case 3: /* Power down everything */
3112 break;
3113 }
3114 /*
3115 * PCI spec says we can only go into D3 state from D0 state.
3116 * Transition from D[12] into D0 before going to D3 state.
3117 */
3118 ps = pci_get_powerstate(dev);
3119 if (ps != PCI_POWERSTATE_D0 && ps != PCI_POWERSTATE_D3)
3120 pci_set_powerstate(dev, PCI_POWERSTATE_D0);
3121 if (pci_get_powerstate(dev) != PCI_POWERSTATE_D3)
3122 pci_set_powerstate(dev, PCI_POWERSTATE_D3);
3123 }
Cache object: 9ca13616d9dfa803c7c3f53e3bb7f8ac
|