1 /*-
2 * Copyright (c) 2010 Isilon Systems, Inc.
3 * Copyright (c) 2010 iX Systems, Inc.
4 * Copyright (c) 2010 Panasas, Inc.
5 * Copyright (c) 2013-2016 Mellanox Technologies, Ltd.
6 * All rights reserved.
7 * Copyright (c) 2020-2022 The FreeBSD Foundation
8 *
9 * Portions of this software were developed by Björn Zeeb
10 * under sponsorship from the FreeBSD Foundation.
11 *
12 * Redistribution and use in source and binary forms, with or without
13 * modification, are permitted provided that the following conditions
14 * are met:
15 * 1. Redistributions of source code must retain the above copyright
16 * notice unmodified, this list of conditions, and the following
17 * disclaimer.
18 * 2. Redistributions in binary form must reproduce the above copyright
19 * notice, this list of conditions and the following disclaimer in the
20 * documentation and/or other materials provided with the distribution.
21 *
22 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
23 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
24 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
25 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
26 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
27 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
31 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 *
33 * $FreeBSD$
34 */
35 #ifndef _LINUXKPI_LINUX_PCI_H_
36 #define _LINUXKPI_LINUX_PCI_H_
37
38 #define CONFIG_PCI_MSI
39
40 #include <linux/types.h>
41
42 #include <sys/param.h>
43 #include <sys/bus.h>
44 #include <sys/module.h>
45 #include <sys/nv.h>
46 #include <sys/pciio.h>
47 #include <sys/rman.h>
48 #include <dev/pci/pcivar.h>
49 #include <dev/pci/pcireg.h>
50 #include <dev/pci/pci_private.h>
51
52 #include <machine/resource.h>
53
54 #include <linux/list.h>
55 #include <linux/dmapool.h>
56 #include <linux/dma-mapping.h>
57 #include <linux/compiler.h>
58 #include <linux/errno.h>
59 #include <asm/atomic.h>
60 #include <linux/device.h>
61 #include <linux/pci_ids.h>
62 #include <linux/pm.h>
63
64 struct pci_device_id {
65 uint32_t vendor;
66 uint32_t device;
67 uint32_t subvendor;
68 uint32_t subdevice;
69 uint32_t class;
70 uint32_t class_mask;
71 uintptr_t driver_data;
72 };
73
74 /* Linux has an empty element at the end of the ID table -> nitems() - 1. */
75 #define MODULE_DEVICE_TABLE(_bus, _table) \
76 \
77 static device_method_t _ ## _bus ## _ ## _table ## _methods[] = { \
78 DEVMETHOD_END \
79 }; \
80 \
81 static driver_t _ ## _bus ## _ ## _table ## _driver = { \
82 "lkpi_" #_bus #_table, \
83 _ ## _bus ## _ ## _table ## _methods, \
84 0 \
85 }; \
86 \
87 DRIVER_MODULE(lkpi_ ## _table, pci, _ ## _bus ## _ ## _table ## _driver,\
88 0, 0); \
89 \
90 MODULE_PNP_INFO("U32:vendor;U32:device;V32:subvendor;V32:subdevice", \
91 _bus, lkpi_ ## _table, _table, nitems(_table) - 1)
92
93 #define PCI_ANY_ID -1U
94
95 #define PCI_DEVFN(slot, func) ((((slot) & 0x1f) << 3) | ((func) & 0x07))
96 #define PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f)
97 #define PCI_FUNC(devfn) ((devfn) & 0x07)
98 #define PCI_BUS_NUM(devfn) (((devfn) >> 8) & 0xff)
99
100 #define PCI_VDEVICE(_vendor, _device) \
101 .vendor = PCI_VENDOR_ID_##_vendor, .device = (_device), \
102 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
103 #define PCI_DEVICE(_vendor, _device) \
104 .vendor = (_vendor), .device = (_device), \
105 .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
106
107 #define to_pci_dev(n) container_of(n, struct pci_dev, dev)
108
109 #define PCI_STD_NUM_BARS 6
110 #define PCI_VENDOR_ID PCIR_VENDOR
111 #define PCI_DEVICE_ID PCIR_DEVICE
112 #define PCI_COMMAND PCIR_COMMAND
113 #define PCI_COMMAND_INTX_DISABLE PCIM_CMD_INTxDIS
114 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL /* Device Control */
115 #define PCI_EXP_LNKCTL PCIER_LINK_CTL /* Link Control */
116 #define PCI_EXP_LNKCTL_ASPM_L0S PCIEM_LINK_CTL_ASPMC_L0S
117 #define PCI_EXP_LNKCTL_ASPM_L1 PCIEM_LINK_CTL_ASPMC_L1
118 #define PCI_EXP_LNKCTL_ASPMC PCIEM_LINK_CTL_ASPMC
119 #define PCI_EXP_LNKCTL_CLKREQ_EN PCIEM_LINK_CTL_ECPM /* Enable clock PM */
120 #define PCI_EXP_LNKCTL_HAWD PCIEM_LINK_CTL_HAWD
121 #define PCI_EXP_FLAGS_TYPE PCIEM_FLAGS_TYPE /* Device/Port type */
122 #define PCI_EXP_DEVCAP PCIER_DEVICE_CAP /* Device capabilities */
123 #define PCI_EXP_DEVSTA PCIER_DEVICE_STA /* Device Status */
124 #define PCI_EXP_LNKCAP PCIER_LINK_CAP /* Link Capabilities */
125 #define PCI_EXP_LNKSTA PCIER_LINK_STA /* Link Status */
126 #define PCI_EXP_SLTCAP PCIER_SLOT_CAP /* Slot Capabilities */
127 #define PCI_EXP_SLTCTL PCIER_SLOT_CTL /* Slot Control */
128 #define PCI_EXP_SLTSTA PCIER_SLOT_STA /* Slot Status */
129 #define PCI_EXP_RTCTL PCIER_ROOT_CTL /* Root Control */
130 #define PCI_EXP_RTCAP PCIER_ROOT_CAP /* Root Capabilities */
131 #define PCI_EXP_RTSTA PCIER_ROOT_STA /* Root Status */
132 #define PCI_EXP_DEVCAP2 PCIER_DEVICE_CAP2 /* Device Capabilities 2 */
133 #define PCI_EXP_DEVCTL2 PCIER_DEVICE_CTL2 /* Device Control 2 */
134 #define PCI_EXP_DEVCTL2_LTR_EN PCIEM_CTL2_LTR_ENABLE
135 #define PCI_EXP_DEVCTL2_COMP_TMOUT_DIS PCIEM_CTL2_COMP_TIMO_DISABLE
136 #define PCI_EXP_LNKCAP2 PCIER_LINK_CAP2 /* Link Capabilities 2 */
137 #define PCI_EXP_LNKCTL2 PCIER_LINK_CTL2 /* Link Control 2 */
138 #define PCI_EXP_LNKSTA2 PCIER_LINK_STA2 /* Link Status 2 */
139 #define PCI_EXP_FLAGS PCIER_FLAGS /* Capabilities register */
140 #define PCI_EXP_FLAGS_VERS PCIEM_FLAGS_VERSION /* Capability version */
141 #define PCI_EXP_TYPE_ROOT_PORT PCIEM_TYPE_ROOT_PORT /* Root Port */
142 #define PCI_EXP_TYPE_ENDPOINT PCIEM_TYPE_ENDPOINT /* Express Endpoint */
143 #define PCI_EXP_TYPE_LEG_END PCIEM_TYPE_LEGACY_ENDPOINT /* Legacy Endpoint */
144 #define PCI_EXP_TYPE_DOWNSTREAM PCIEM_TYPE_DOWNSTREAM_PORT /* Downstream Port */
145 #define PCI_EXP_FLAGS_SLOT PCIEM_FLAGS_SLOT /* Slot implemented */
146 #define PCI_EXP_TYPE_RC_EC PCIEM_TYPE_ROOT_EC /* Root Complex Event Collector */
147 #define PCI_EXP_LNKCAP_SLS_2_5GB 0x01 /* Supported Link Speed 2.5GT/s */
148 #define PCI_EXP_LNKCAP_SLS_5_0GB 0x02 /* Supported Link Speed 5.0GT/s */
149 #define PCI_EXP_LNKCAP_SLS_8_0GB 0x03 /* Supported Link Speed 8.0GT/s */
150 #define PCI_EXP_LNKCAP_SLS_16_0GB 0x04 /* Supported Link Speed 16.0GT/s */
151 #define PCI_EXP_LNKCAP_SLS_32_0GB 0x05 /* Supported Link Speed 32.0GT/s */
152 #define PCI_EXP_LNKCAP_SLS_64_0GB 0x06 /* Supported Link Speed 64.0GT/s */
153 #define PCI_EXP_LNKCAP_MLW 0x03f0 /* Maximum Link Width */
154 #define PCI_EXP_LNKCAP2_SLS_2_5GB 0x02 /* Supported Link Speed 2.5GT/s */
155 #define PCI_EXP_LNKCAP2_SLS_5_0GB 0x04 /* Supported Link Speed 5.0GT/s */
156 #define PCI_EXP_LNKCAP2_SLS_8_0GB 0x08 /* Supported Link Speed 8.0GT/s */
157 #define PCI_EXP_LNKCAP2_SLS_16_0GB 0x10 /* Supported Link Speed 16.0GT/s */
158 #define PCI_EXP_LNKCAP2_SLS_32_0GB 0x20 /* Supported Link Speed 32.0GT/s */
159 #define PCI_EXP_LNKCAP2_SLS_64_0GB 0x40 /* Supported Link Speed 64.0GT/s */
160 #define PCI_EXP_LNKCTL2_TLS 0x000f
161 #define PCI_EXP_LNKCTL2_TLS_2_5GT 0x0001 /* Supported Speed 2.5GT/s */
162 #define PCI_EXP_LNKCTL2_TLS_5_0GT 0x0002 /* Supported Speed 5GT/s */
163 #define PCI_EXP_LNKCTL2_TLS_8_0GT 0x0003 /* Supported Speed 8GT/s */
164 #define PCI_EXP_LNKCTL2_TLS_16_0GT 0x0004 /* Supported Speed 16GT/s */
165 #define PCI_EXP_LNKCTL2_TLS_32_0GT 0x0005 /* Supported Speed 32GT/s */
166 #define PCI_EXP_LNKCTL2_TLS_64_0GT 0x0006 /* Supported Speed 64GT/s */
167 #define PCI_EXP_LNKCTL2_ENTER_COMP 0x0010 /* Enter Compliance */
168 #define PCI_EXP_LNKCTL2_TX_MARGIN 0x0380 /* Transmit Margin */
169
170 #define PCI_MSI_ADDRESS_LO PCIR_MSI_ADDR
171 #define PCI_MSI_ADDRESS_HI PCIR_MSI_ADDR_HIGH
172 #define PCI_MSI_FLAGS PCIR_MSI_CTRL
173 #define PCI_MSI_FLAGS_ENABLE PCIM_MSICTRL_MSI_ENABLE
174 #define PCI_MSIX_FLAGS PCIR_MSIX_CTRL
175 #define PCI_MSIX_FLAGS_ENABLE PCIM_MSIXCTRL_MSIX_ENABLE
176
177 #define PCI_EXP_LNKCAP_CLKPM 0x00040000
178 #define PCI_EXP_DEVSTA_TRPND 0x0020
179
180 #define IORESOURCE_MEM (1 << SYS_RES_MEMORY)
181 #define IORESOURCE_IO (1 << SYS_RES_IOPORT)
182 #define IORESOURCE_IRQ (1 << SYS_RES_IRQ)
183
184 enum pci_bus_speed {
185 PCI_SPEED_UNKNOWN = -1,
186 PCIE_SPEED_2_5GT,
187 PCIE_SPEED_5_0GT,
188 PCIE_SPEED_8_0GT,
189 PCIE_SPEED_16_0GT,
190 PCIE_SPEED_32_0GT,
191 PCIE_SPEED_64_0GT,
192 };
193
194 enum pcie_link_width {
195 PCIE_LNK_WIDTH_RESRV = 0x00,
196 PCIE_LNK_X1 = 0x01,
197 PCIE_LNK_X2 = 0x02,
198 PCIE_LNK_X4 = 0x04,
199 PCIE_LNK_X8 = 0x08,
200 PCIE_LNK_X12 = 0x0c,
201 PCIE_LNK_X16 = 0x10,
202 PCIE_LNK_X32 = 0x20,
203 PCIE_LNK_WIDTH_UNKNOWN = 0xff,
204 };
205
206 #define PCIE_LINK_STATE_L0S 0x00000001
207 #define PCIE_LINK_STATE_L1 0x00000002
208 #define PCIE_LINK_STATE_CLKPM 0x00000004
209
210 typedef int pci_power_t;
211
212 #define PCI_D0 PCI_POWERSTATE_D0
213 #define PCI_D1 PCI_POWERSTATE_D1
214 #define PCI_D2 PCI_POWERSTATE_D2
215 #define PCI_D3hot PCI_POWERSTATE_D3
216 #define PCI_D3cold 4
217
218 #define PCI_POWER_ERROR PCI_POWERSTATE_UNKNOWN
219
220 extern const char *pci_power_names[6];
221
222 #define PCI_ERR_ROOT_COMMAND PCIR_AER_ROOTERR_CMD
223 #define PCI_ERR_ROOT_ERR_SRC PCIR_AER_COR_SOURCE_ID
224
225 #define PCI_EXT_CAP_ID_ERR PCIZ_AER
226 #define PCI_EXT_CAP_ID_L1SS PCIZ_L1PM
227
228 #define PCI_L1SS_CTL1 0x8
229 #define PCI_L1SS_CTL1_L1SS_MASK 0xf
230
231 #define PCI_IRQ_LEGACY 0x01
232 #define PCI_IRQ_MSI 0x02
233 #define PCI_IRQ_MSIX 0x04
234 #define PCI_IRQ_ALL_TYPES (PCI_IRQ_MSIX|PCI_IRQ_MSI|PCI_IRQ_LEGACY)
235
236 struct pci_dev;
237
238 struct pci_driver {
239 struct list_head node;
240 char *name;
241 const struct pci_device_id *id_table;
242 int (*probe)(struct pci_dev *dev, const struct pci_device_id *id);
243 void (*remove)(struct pci_dev *dev);
244 int (*suspend) (struct pci_dev *dev, pm_message_t state); /* Device suspended */
245 int (*resume) (struct pci_dev *dev); /* Device woken up */
246 void (*shutdown) (struct pci_dev *dev); /* Device shutdown */
247 driver_t bsddriver;
248 devclass_t bsdclass;
249 struct device_driver driver;
250 const struct pci_error_handlers *err_handler;
251 bool isdrm;
252 int bsd_probe_return;
253 int (*bsd_iov_init)(device_t dev, uint16_t num_vfs,
254 const nvlist_t *pf_config);
255 void (*bsd_iov_uninit)(device_t dev);
256 int (*bsd_iov_add_vf)(device_t dev, uint16_t vfnum,
257 const nvlist_t *vf_config);
258 };
259
260 struct pci_bus {
261 struct pci_dev *self;
262 int domain;
263 int number;
264 };
265
266 extern struct list_head pci_drivers;
267 extern struct list_head pci_devices;
268 extern spinlock_t pci_lock;
269
270 #define __devexit_p(x) x
271
272 #define module_pci_driver(_driver) \
273 \
274 static inline int \
275 _pci_init(void) \
276 { \
277 \
278 return (linux_pci_register_driver(&_driver)); \
279 } \
280 \
281 static inline void \
282 _pci_exit(void) \
283 { \
284 \
285 linux_pci_unregister_driver(&_driver); \
286 } \
287 \
288 module_init(_pci_init); \
289 module_exit(_pci_exit)
290
291 struct msi_msg {
292 uint32_t data;
293 };
294
295 struct msi_desc {
296 struct msi_msg msg;
297 struct {
298 bool is_64;
299 } msi_attrib;
300 };
301
302 /*
303 * If we find drivers accessing this from multiple KPIs we may have to
304 * refcount objects of this structure.
305 */
306 struct pci_mmio_region {
307 TAILQ_ENTRY(pci_mmio_region) next;
308 struct resource *res;
309 int rid;
310 int type;
311 };
312
313 struct pci_dev {
314 struct device dev;
315 struct list_head links;
316 struct pci_driver *pdrv;
317 struct pci_bus *bus;
318 struct pci_dev *root;
319 pci_power_t current_state;
320 uint16_t device;
321 uint16_t vendor;
322 uint16_t subsystem_vendor;
323 uint16_t subsystem_device;
324 unsigned int irq;
325 unsigned int devfn;
326 uint32_t class;
327 uint8_t revision;
328 uint8_t msi_cap;
329 uint8_t msix_cap;
330 bool managed; /* devres "pcim_*(). */
331 bool want_iomap_res;
332 bool msi_enabled;
333 bool msix_enabled;
334 phys_addr_t rom;
335 size_t romlen;
336 /*
337 * msi_desc should be an array one day? For as long as we only support
338 * 1 MSI vector this is fine.
339 */
340 struct msi_desc *msi_desc;
341
342 TAILQ_HEAD(, pci_mmio_region) mmio;
343 };
344
345 /* We need some meta-struct to keep track of these for devres. */
346 struct pci_devres {
347 bool enable_io;
348 /* PCIR_MAX_BAR_0 + 1 = 6 => BIT(0..5). */
349 uint8_t region_mask;
350 struct resource *region_table[PCIR_MAX_BAR_0 + 1]; /* Not needed. */
351 };
352 struct pcim_iomap_devres {
353 void *mmio_table[PCIR_MAX_BAR_0 + 1];
354 struct resource *res_table[PCIR_MAX_BAR_0 + 1];
355 };
356
357 int pci_request_region(struct pci_dev *pdev, int bar, const char *res_name);
358 int pci_alloc_irq_vectors(struct pci_dev *pdev, int minv, int maxv,
359 unsigned int flags);
360 bool pci_device_is_present(struct pci_dev *pdev);
361
362 /* Internal helper function(s). */
363 struct pci_dev *lkpinew_pci_dev(device_t);
364 struct pci_devres *lkpi_pci_devres_get_alloc(struct pci_dev *pdev);
365 void lkpi_pci_devres_release(struct device *, void *);
366 struct resource *_lkpi_pci_iomap(struct pci_dev *pdev, int bar, int mmio_size);
367 struct pcim_iomap_devres *lkpi_pcim_iomap_devres_find(struct pci_dev *pdev);
368 void lkpi_pcim_iomap_table_release(struct device *, void *);
369 struct pci_dev *lkpi_pci_get_device(uint16_t, uint16_t, struct pci_dev *);
370 struct msi_desc *lkpi_pci_msi_desc_alloc(int);
371
372 static inline bool
373 dev_is_pci(struct device *dev)
374 {
375
376 return (device_get_devclass(dev->bsddev) == devclass_find("pci"));
377 }
378
379 static inline int
380 pci_resource_type(struct pci_dev *pdev, int bar)
381 {
382 struct pci_map *pm;
383
384 pm = pci_find_bar(pdev->dev.bsddev, PCIR_BAR(bar));
385 if (!pm)
386 return (-1);
387
388 if (PCI_BAR_IO(pm->pm_value))
389 return (SYS_RES_IOPORT);
390 else
391 return (SYS_RES_MEMORY);
392 }
393
394 struct resource_list_entry *linux_pci_reserve_bar(struct pci_dev *pdev,
395 struct resource_list *rl, int type, int rid);
396
397 static inline struct resource_list_entry *
398 linux_pci_get_rle(struct pci_dev *pdev, int type, int rid, bool reserve_bar)
399 {
400 struct pci_devinfo *dinfo;
401 struct resource_list *rl;
402 struct resource_list_entry *rle;
403
404 dinfo = device_get_ivars(pdev->dev.bsddev);
405 rl = &dinfo->resources;
406 rle = resource_list_find(rl, type, rid);
407 /* Reserve resources for this BAR if needed. */
408 if (rle == NULL && reserve_bar)
409 rle = linux_pci_reserve_bar(pdev, rl, type, rid);
410 return (rle);
411 }
412
413 static inline struct resource_list_entry *
414 linux_pci_get_bar(struct pci_dev *pdev, int bar, bool reserve)
415 {
416 int type;
417
418 type = pci_resource_type(pdev, bar);
419 if (type < 0)
420 return (NULL);
421 bar = PCIR_BAR(bar);
422 return (linux_pci_get_rle(pdev, type, bar, reserve));
423 }
424
425 static inline struct device *
426 linux_pci_find_irq_dev(unsigned int irq)
427 {
428 struct pci_dev *pdev;
429 struct device *found;
430
431 found = NULL;
432 spin_lock(&pci_lock);
433 list_for_each_entry(pdev, &pci_devices, links) {
434 if (irq == pdev->dev.irq ||
435 (irq >= pdev->dev.irq_start && irq < pdev->dev.irq_end)) {
436 found = &pdev->dev;
437 break;
438 }
439 }
440 spin_unlock(&pci_lock);
441 return (found);
442 }
443
444 /*
445 * All drivers just seem to want to inspect the type not flags.
446 */
447 static inline int
448 pci_resource_flags(struct pci_dev *pdev, int bar)
449 {
450 int type;
451
452 type = pci_resource_type(pdev, bar);
453 if (type < 0)
454 return (0);
455 return (1 << type);
456 }
457
458 static inline const char *
459 pci_name(struct pci_dev *d)
460 {
461
462 return device_get_desc(d->dev.bsddev);
463 }
464
465 static inline void *
466 pci_get_drvdata(struct pci_dev *pdev)
467 {
468
469 return dev_get_drvdata(&pdev->dev);
470 }
471
472 static inline void
473 pci_set_drvdata(struct pci_dev *pdev, void *data)
474 {
475
476 dev_set_drvdata(&pdev->dev, data);
477 }
478
479 static inline struct pci_dev *
480 pci_dev_get(struct pci_dev *pdev)
481 {
482
483 if (pdev != NULL)
484 get_device(&pdev->dev);
485 return (pdev);
486 }
487
488 static __inline void
489 pci_dev_put(struct pci_dev *pdev)
490 {
491
492 if (pdev != NULL)
493 put_device(&pdev->dev);
494 }
495
496 static inline int
497 pci_enable_device(struct pci_dev *pdev)
498 {
499
500 pci_enable_io(pdev->dev.bsddev, SYS_RES_IOPORT);
501 pci_enable_io(pdev->dev.bsddev, SYS_RES_MEMORY);
502 return (0);
503 }
504
505 static inline void
506 pci_disable_device(struct pci_dev *pdev)
507 {
508
509 pci_disable_busmaster(pdev->dev.bsddev);
510 }
511
512 static inline int
513 pci_set_master(struct pci_dev *pdev)
514 {
515
516 pci_enable_busmaster(pdev->dev.bsddev);
517 return (0);
518 }
519
520 static inline int
521 pci_set_power_state(struct pci_dev *pdev, int state)
522 {
523
524 pci_set_powerstate(pdev->dev.bsddev, state);
525 return (0);
526 }
527
528 static inline int
529 pci_clear_master(struct pci_dev *pdev)
530 {
531
532 pci_disable_busmaster(pdev->dev.bsddev);
533 return (0);
534 }
535
536 static inline bool
537 pci_is_root_bus(struct pci_bus *pbus)
538 {
539
540 return (pbus->self == NULL);
541 }
542
543 static inline struct pci_dev *
544 pci_upstream_bridge(struct pci_dev *pdev)
545 {
546
547 if (pci_is_root_bus(pdev->bus))
548 return (NULL);
549
550 /*
551 * If we do not have a (proper) "upstream bridge" set, e.g., we point
552 * to ourselves, try to handle this case on the fly like we do
553 * for pcie_find_root_port().
554 */
555 if (pdev == pdev->bus->self) {
556 device_t bridge;
557
558 bridge = device_get_parent(pdev->dev.bsddev);
559 if (bridge == NULL)
560 goto done;
561 bridge = device_get_parent(bridge);
562 if (bridge == NULL)
563 goto done;
564 if (device_get_devclass(device_get_parent(bridge)) !=
565 devclass_find("pci"))
566 goto done;
567
568 /*
569 * "bridge" is a PCI-to-PCI bridge. Create a Linux pci_dev
570 * for it so it can be returned.
571 */
572 pdev->bus->self = lkpinew_pci_dev(bridge);
573 }
574 done:
575 return (pdev->bus->self);
576 }
577
578 static inline struct pci_devres *
579 lkpi_pci_devres_find(struct pci_dev *pdev)
580 {
581
582 if (!pdev->managed)
583 return (NULL);
584
585 return (lkpi_pci_devres_get_alloc(pdev));
586 }
587
588 static inline void
589 pci_release_region(struct pci_dev *pdev, int bar)
590 {
591 struct resource_list_entry *rle;
592 struct pci_devres *dr;
593 struct pci_mmio_region *mmio, *p;
594
595 if ((rle = linux_pci_get_bar(pdev, bar, false)) == NULL)
596 return;
597
598 /*
599 * As we implicitly track the requests we also need to clear them on
600 * release. Do clear before resource release.
601 */
602 dr = lkpi_pci_devres_find(pdev);
603 if (dr != NULL) {
604 KASSERT(dr->region_table[bar] == rle->res, ("%s: pdev %p bar %d"
605 " region_table res %p != rel->res %p\n", __func__, pdev,
606 bar, dr->region_table[bar], rle->res));
607 dr->region_table[bar] = NULL;
608 dr->region_mask &= ~(1 << bar);
609 }
610
611 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
612 if (rle->res != (void *)rman_get_bushandle(mmio->res))
613 continue;
614 TAILQ_REMOVE(&pdev->mmio, mmio, next);
615 free(mmio, M_DEVBUF);
616 }
617
618 bus_release_resource(pdev->dev.bsddev, rle->type, rle->rid, rle->res);
619 }
620
621 static inline void
622 pci_release_regions(struct pci_dev *pdev)
623 {
624 int i;
625
626 for (i = 0; i <= PCIR_MAX_BAR_0; i++)
627 pci_release_region(pdev, i);
628 }
629
630 static inline int
631 pci_request_regions(struct pci_dev *pdev, const char *res_name)
632 {
633 int error;
634 int i;
635
636 for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
637 error = pci_request_region(pdev, i, res_name);
638 if (error && error != -ENODEV) {
639 pci_release_regions(pdev);
640 return (error);
641 }
642 }
643 return (0);
644 }
645
646 static inline void
647 lkpi_pci_disable_msix(struct pci_dev *pdev)
648 {
649
650 pci_release_msi(pdev->dev.bsddev);
651
652 /*
653 * The MSIX IRQ numbers associated with this PCI device are no
654 * longer valid and might be re-assigned. Make sure
655 * linux_pci_find_irq_dev() does no longer see them by
656 * resetting their references to zero:
657 */
658 pdev->dev.irq_start = 0;
659 pdev->dev.irq_end = 0;
660 pdev->msix_enabled = false;
661 }
662 /* Only for consistency. No conflict on that one. */
663 #define pci_disable_msix(pdev) lkpi_pci_disable_msix(pdev)
664
665 static inline void
666 lkpi_pci_disable_msi(struct pci_dev *pdev)
667 {
668
669 pci_release_msi(pdev->dev.bsddev);
670
671 pdev->dev.irq_start = 0;
672 pdev->dev.irq_end = 0;
673 pdev->irq = pdev->dev.irq;
674 pdev->msi_enabled = false;
675 }
676 #define pci_disable_msi(pdev) lkpi_pci_disable_msi(pdev)
677 #define pci_free_irq_vectors(pdev) lkpi_pci_disable_msi(pdev)
678
679 unsigned long pci_resource_start(struct pci_dev *pdev, int bar);
680 unsigned long pci_resource_len(struct pci_dev *pdev, int bar);
681
682 static inline bus_addr_t
683 pci_bus_address(struct pci_dev *pdev, int bar)
684 {
685
686 return (pci_resource_start(pdev, bar));
687 }
688
689 #define PCI_CAP_ID_EXP PCIY_EXPRESS
690 #define PCI_CAP_ID_PCIX PCIY_PCIX
691 #define PCI_CAP_ID_AGP PCIY_AGP
692 #define PCI_CAP_ID_PM PCIY_PMG
693
694 #define PCI_EXP_DEVCTL PCIER_DEVICE_CTL
695 #define PCI_EXP_DEVCTL_PAYLOAD PCIEM_CTL_MAX_PAYLOAD
696 #define PCI_EXP_DEVCTL_READRQ PCIEM_CTL_MAX_READ_REQUEST
697 #define PCI_EXP_LNKCTL PCIER_LINK_CTL
698 #define PCI_EXP_LNKSTA PCIER_LINK_STA
699
700 static inline int
701 pci_find_capability(struct pci_dev *pdev, int capid)
702 {
703 int reg;
704
705 if (pci_find_cap(pdev->dev.bsddev, capid, ®))
706 return (0);
707 return (reg);
708 }
709
710 static inline int pci_pcie_cap(struct pci_dev *dev)
711 {
712 return pci_find_capability(dev, PCI_CAP_ID_EXP);
713 }
714
715 static inline int
716 pci_find_ext_capability(struct pci_dev *pdev, int capid)
717 {
718 int reg;
719
720 if (pci_find_extcap(pdev->dev.bsddev, capid, ®))
721 return (0);
722 return (reg);
723 }
724
725 #define PCIM_PCAP_PME_SHIFT 11
726 static __inline bool
727 pci_pme_capable(struct pci_dev *pdev, uint32_t flag)
728 {
729 struct pci_devinfo *dinfo;
730 pcicfgregs *cfg;
731
732 if (flag > (PCIM_PCAP_D3PME_COLD >> PCIM_PCAP_PME_SHIFT))
733 return (false);
734
735 dinfo = device_get_ivars(pdev->dev.bsddev);
736 cfg = &dinfo->cfg;
737
738 if (cfg->pp.pp_cap == 0)
739 return (false);
740
741 if ((cfg->pp.pp_cap & (1 << (PCIM_PCAP_PME_SHIFT + flag))) != 0)
742 return (true);
743
744 return (false);
745 }
746
747 static inline int
748 pci_disable_link_state(struct pci_dev *pdev, uint32_t flags)
749 {
750
751 if (!pci_enable_aspm)
752 return (-EPERM);
753
754 return (-ENXIO);
755 }
756
757 static inline int
758 pci_read_config_byte(const struct pci_dev *pdev, int where, u8 *val)
759 {
760
761 *val = (u8)pci_read_config(pdev->dev.bsddev, where, 1);
762 return (0);
763 }
764
765 static inline int
766 pci_read_config_word(const struct pci_dev *pdev, int where, u16 *val)
767 {
768
769 *val = (u16)pci_read_config(pdev->dev.bsddev, where, 2);
770 return (0);
771 }
772
773 static inline int
774 pci_read_config_dword(const struct pci_dev *pdev, int where, u32 *val)
775 {
776
777 *val = (u32)pci_read_config(pdev->dev.bsddev, where, 4);
778 return (0);
779 }
780
781 static inline int
782 pci_write_config_byte(const struct pci_dev *pdev, int where, u8 val)
783 {
784
785 pci_write_config(pdev->dev.bsddev, where, val, 1);
786 return (0);
787 }
788
789 static inline int
790 pci_write_config_word(const struct pci_dev *pdev, int where, u16 val)
791 {
792
793 pci_write_config(pdev->dev.bsddev, where, val, 2);
794 return (0);
795 }
796
797 static inline int
798 pci_write_config_dword(const struct pci_dev *pdev, int where, u32 val)
799 {
800
801 pci_write_config(pdev->dev.bsddev, where, val, 4);
802 return (0);
803 }
804
805 int linux_pci_register_driver(struct pci_driver *pdrv);
806 int linux_pci_register_drm_driver(struct pci_driver *pdrv);
807 void linux_pci_unregister_driver(struct pci_driver *pdrv);
808 void linux_pci_unregister_drm_driver(struct pci_driver *pdrv);
809
810 #define pci_register_driver(pdrv) linux_pci_register_driver(pdrv)
811 #define pci_unregister_driver(pdrv) linux_pci_unregister_driver(pdrv)
812
813 struct msix_entry {
814 int entry;
815 int vector;
816 };
817
818 /*
819 * Enable msix, positive errors indicate actual number of available
820 * vectors. Negative errors are failures.
821 *
822 * NB: define added to prevent this definition of pci_enable_msix from
823 * clashing with the native FreeBSD version.
824 */
825 #define pci_enable_msix(...) \
826 linux_pci_enable_msix(__VA_ARGS__)
827
828 static inline int
829 pci_enable_msix(struct pci_dev *pdev, struct msix_entry *entries, int nreq)
830 {
831 struct resource_list_entry *rle;
832 int error;
833 int avail;
834 int i;
835
836 avail = pci_msix_count(pdev->dev.bsddev);
837 if (avail < nreq) {
838 if (avail == 0)
839 return -EINVAL;
840 return avail;
841 }
842 avail = nreq;
843 if ((error = -pci_alloc_msix(pdev->dev.bsddev, &avail)) != 0)
844 return error;
845 /*
846 * Handle case where "pci_alloc_msix()" may allocate less
847 * interrupts than available and return with no error:
848 */
849 if (avail < nreq) {
850 pci_release_msi(pdev->dev.bsddev);
851 return avail;
852 }
853 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
854 pdev->dev.irq_start = rle->start;
855 pdev->dev.irq_end = rle->start + avail;
856 for (i = 0; i < nreq; i++)
857 entries[i].vector = pdev->dev.irq_start + i;
858 pdev->msix_enabled = true;
859 return (0);
860 }
861
862 #define pci_enable_msix_range(...) \
863 linux_pci_enable_msix_range(__VA_ARGS__)
864
865 static inline int
866 pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries,
867 int minvec, int maxvec)
868 {
869 int nvec = maxvec;
870 int rc;
871
872 if (maxvec < minvec)
873 return (-ERANGE);
874
875 do {
876 rc = pci_enable_msix(dev, entries, nvec);
877 if (rc < 0) {
878 return (rc);
879 } else if (rc > 0) {
880 if (rc < minvec)
881 return (-ENOSPC);
882 nvec = rc;
883 }
884 } while (rc);
885 return (nvec);
886 }
887
888 #define pci_enable_msi(pdev) \
889 linux_pci_enable_msi(pdev)
890
891 static inline int
892 pci_enable_msi(struct pci_dev *pdev)
893 {
894 struct resource_list_entry *rle;
895 int error;
896 int avail;
897
898 avail = pci_msi_count(pdev->dev.bsddev);
899 if (avail < 1)
900 return -EINVAL;
901
902 avail = 1; /* this function only enable one MSI IRQ */
903 if ((error = -pci_alloc_msi(pdev->dev.bsddev, &avail)) != 0)
904 return error;
905
906 rle = linux_pci_get_rle(pdev, SYS_RES_IRQ, 1, false);
907 pdev->dev.irq_start = rle->start;
908 pdev->dev.irq_end = rle->start + avail;
909 pdev->irq = rle->start;
910 pdev->msi_enabled = true;
911 return (0);
912 }
913
914 static inline int
915 pci_channel_offline(struct pci_dev *pdev)
916 {
917
918 return (pci_read_config(pdev->dev.bsddev, PCIR_VENDOR, 2) == PCIV_INVALID);
919 }
920
921 static inline int pci_enable_sriov(struct pci_dev *dev, int nr_virtfn)
922 {
923 return -ENODEV;
924 }
925
926 static inline void pci_disable_sriov(struct pci_dev *dev)
927 {
928 }
929
930 static inline void *
931 pci_iomap(struct pci_dev *pdev, int mmio_bar, int mmio_size)
932 {
933 struct resource *res;
934
935 res = _lkpi_pci_iomap(pdev, mmio_bar, mmio_size);
936 if (res == NULL)
937 return (NULL);
938 /* This is a FreeBSD extension so we can use bus_*(). */
939 if (pdev->want_iomap_res)
940 return (res);
941 return ((void *)rman_get_bushandle(res));
942 }
943
944 static inline void
945 pci_iounmap(struct pci_dev *pdev, void *res)
946 {
947 struct pci_mmio_region *mmio, *p;
948
949 TAILQ_FOREACH_SAFE(mmio, &pdev->mmio, next, p) {
950 if (res != (void *)rman_get_bushandle(mmio->res))
951 continue;
952 bus_release_resource(pdev->dev.bsddev,
953 mmio->type, mmio->rid, mmio->res);
954 TAILQ_REMOVE(&pdev->mmio, mmio, next);
955 free(mmio, M_DEVBUF);
956 return;
957 }
958 }
959
960 static inline void
961 lkpi_pci_save_state(struct pci_dev *pdev)
962 {
963
964 pci_save_state(pdev->dev.bsddev);
965 }
966
967 static inline void
968 lkpi_pci_restore_state(struct pci_dev *pdev)
969 {
970
971 pci_restore_state(pdev->dev.bsddev);
972 }
973
974 #define pci_save_state(dev) lkpi_pci_save_state(dev)
975 #define pci_restore_state(dev) lkpi_pci_restore_state(dev)
976
977 static inline int
978 pci_reset_function(struct pci_dev *pdev)
979 {
980
981 return (-ENOSYS);
982 }
983
984 #define DEFINE_PCI_DEVICE_TABLE(_table) \
985 const struct pci_device_id _table[] __devinitdata
986
987 /* XXX This should not be necessary. */
988 #define pcix_set_mmrbc(d, v) 0
989 #define pcix_get_max_mmrbc(d) 0
990 #define pcie_set_readrq(d, v) pci_set_max_read_req((d)->dev.bsddev, (v))
991
992 #define PCI_DMA_BIDIRECTIONAL 0
993 #define PCI_DMA_TODEVICE 1
994 #define PCI_DMA_FROMDEVICE 2
995 #define PCI_DMA_NONE 3
996
997 #define pci_pool dma_pool
998 #define pci_pool_destroy(...) dma_pool_destroy(__VA_ARGS__)
999 #define pci_pool_alloc(...) dma_pool_alloc(__VA_ARGS__)
1000 #define pci_pool_free(...) dma_pool_free(__VA_ARGS__)
1001 #define pci_pool_create(_name, _pdev, _size, _align, _alloc) \
1002 dma_pool_create(_name, &(_pdev)->dev, _size, _align, _alloc)
1003 #define pci_free_consistent(_hwdev, _size, _vaddr, _dma_handle) \
1004 dma_free_coherent((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
1005 _size, _vaddr, _dma_handle)
1006 #define pci_map_sg(_hwdev, _sg, _nents, _dir) \
1007 dma_map_sg((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
1008 _sg, _nents, (enum dma_data_direction)_dir)
1009 #define pci_map_single(_hwdev, _ptr, _size, _dir) \
1010 dma_map_single((_hwdev) == NULL ? NULL : &(_hwdev->dev), \
1011 (_ptr), (_size), (enum dma_data_direction)_dir)
1012 #define pci_unmap_single(_hwdev, _addr, _size, _dir) \
1013 dma_unmap_single((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
1014 _addr, _size, (enum dma_data_direction)_dir)
1015 #define pci_unmap_sg(_hwdev, _sg, _nents, _dir) \
1016 dma_unmap_sg((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
1017 _sg, _nents, (enum dma_data_direction)_dir)
1018 #define pci_map_page(_hwdev, _page, _offset, _size, _dir) \
1019 dma_map_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, _page,\
1020 _offset, _size, (enum dma_data_direction)_dir)
1021 #define pci_unmap_page(_hwdev, _dma_address, _size, _dir) \
1022 dma_unmap_page((_hwdev) == NULL ? NULL : &(_hwdev)->dev, \
1023 _dma_address, _size, (enum dma_data_direction)_dir)
1024 #define pci_set_dma_mask(_pdev, mask) dma_set_mask(&(_pdev)->dev, (mask))
1025 #define pci_dma_mapping_error(_pdev, _dma_addr) \
1026 dma_mapping_error(&(_pdev)->dev, _dma_addr)
1027 #define pci_set_consistent_dma_mask(_pdev, _mask) \
1028 dma_set_coherent_mask(&(_pdev)->dev, (_mask))
1029 #define DECLARE_PCI_UNMAP_ADDR(x) DEFINE_DMA_UNMAP_ADDR(x);
1030 #define DECLARE_PCI_UNMAP_LEN(x) DEFINE_DMA_UNMAP_LEN(x);
1031 #define pci_unmap_addr dma_unmap_addr
1032 #define pci_unmap_addr_set dma_unmap_addr_set
1033 #define pci_unmap_len dma_unmap_len
1034 #define pci_unmap_len_set dma_unmap_len_set
1035
1036 typedef unsigned int __bitwise pci_channel_state_t;
1037 typedef unsigned int __bitwise pci_ers_result_t;
1038
1039 enum pci_channel_state {
1040 pci_channel_io_normal = 1,
1041 pci_channel_io_frozen = 2,
1042 pci_channel_io_perm_failure = 3,
1043 };
1044
1045 enum pci_ers_result {
1046 PCI_ERS_RESULT_NONE = 1,
1047 PCI_ERS_RESULT_CAN_RECOVER = 2,
1048 PCI_ERS_RESULT_NEED_RESET = 3,
1049 PCI_ERS_RESULT_DISCONNECT = 4,
1050 PCI_ERS_RESULT_RECOVERED = 5,
1051 };
1052
1053 /* PCI bus error event callbacks */
1054 struct pci_error_handlers {
1055 pci_ers_result_t (*error_detected)(struct pci_dev *dev,
1056 enum pci_channel_state error);
1057 pci_ers_result_t (*mmio_enabled)(struct pci_dev *dev);
1058 pci_ers_result_t (*link_reset)(struct pci_dev *dev);
1059 pci_ers_result_t (*slot_reset)(struct pci_dev *dev);
1060 void (*resume)(struct pci_dev *dev);
1061 };
1062
1063 /* FreeBSD does not support SRIOV - yet */
1064 static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
1065 {
1066 return dev;
1067 }
1068
1069 static inline bool pci_is_pcie(struct pci_dev *dev)
1070 {
1071 return !!pci_pcie_cap(dev);
1072 }
1073
1074 static inline u16 pcie_flags_reg(struct pci_dev *dev)
1075 {
1076 int pos;
1077 u16 reg16;
1078
1079 pos = pci_find_capability(dev, PCI_CAP_ID_EXP);
1080 if (!pos)
1081 return 0;
1082
1083 pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16);
1084
1085 return reg16;
1086 }
1087
1088 static inline int pci_pcie_type(struct pci_dev *dev)
1089 {
1090 return (pcie_flags_reg(dev) & PCI_EXP_FLAGS_TYPE) >> 4;
1091 }
1092
1093 static inline int pcie_cap_version(struct pci_dev *dev)
1094 {
1095 return pcie_flags_reg(dev) & PCI_EXP_FLAGS_VERS;
1096 }
1097
1098 static inline bool pcie_cap_has_lnkctl(struct pci_dev *dev)
1099 {
1100 int type = pci_pcie_type(dev);
1101
1102 return pcie_cap_version(dev) > 1 ||
1103 type == PCI_EXP_TYPE_ROOT_PORT ||
1104 type == PCI_EXP_TYPE_ENDPOINT ||
1105 type == PCI_EXP_TYPE_LEG_END;
1106 }
1107
1108 static inline bool pcie_cap_has_devctl(const struct pci_dev *dev)
1109 {
1110 return true;
1111 }
1112
1113 static inline bool pcie_cap_has_sltctl(struct pci_dev *dev)
1114 {
1115 int type = pci_pcie_type(dev);
1116
1117 return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
1118 (type == PCI_EXP_TYPE_DOWNSTREAM &&
1119 pcie_flags_reg(dev) & PCI_EXP_FLAGS_SLOT);
1120 }
1121
1122 static inline bool pcie_cap_has_rtctl(struct pci_dev *dev)
1123 {
1124 int type = pci_pcie_type(dev);
1125
1126 return pcie_cap_version(dev) > 1 || type == PCI_EXP_TYPE_ROOT_PORT ||
1127 type == PCI_EXP_TYPE_RC_EC;
1128 }
1129
1130 static bool pcie_capability_reg_implemented(struct pci_dev *dev, int pos)
1131 {
1132 if (!pci_is_pcie(dev))
1133 return false;
1134
1135 switch (pos) {
1136 case PCI_EXP_FLAGS_TYPE:
1137 return true;
1138 case PCI_EXP_DEVCAP:
1139 case PCI_EXP_DEVCTL:
1140 case PCI_EXP_DEVSTA:
1141 return pcie_cap_has_devctl(dev);
1142 case PCI_EXP_LNKCAP:
1143 case PCI_EXP_LNKCTL:
1144 case PCI_EXP_LNKSTA:
1145 return pcie_cap_has_lnkctl(dev);
1146 case PCI_EXP_SLTCAP:
1147 case PCI_EXP_SLTCTL:
1148 case PCI_EXP_SLTSTA:
1149 return pcie_cap_has_sltctl(dev);
1150 case PCI_EXP_RTCTL:
1151 case PCI_EXP_RTCAP:
1152 case PCI_EXP_RTSTA:
1153 return pcie_cap_has_rtctl(dev);
1154 case PCI_EXP_DEVCAP2:
1155 case PCI_EXP_DEVCTL2:
1156 case PCI_EXP_LNKCAP2:
1157 case PCI_EXP_LNKCTL2:
1158 case PCI_EXP_LNKSTA2:
1159 return pcie_cap_version(dev) > 1;
1160 default:
1161 return false;
1162 }
1163 }
1164
1165 static inline int
1166 pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *dst)
1167 {
1168 if (pos & 3)
1169 return -EINVAL;
1170
1171 if (!pcie_capability_reg_implemented(dev, pos))
1172 return -EINVAL;
1173
1174 return pci_read_config_dword(dev, pci_pcie_cap(dev) + pos, dst);
1175 }
1176
1177 static inline int
1178 pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *dst)
1179 {
1180 if (pos & 3)
1181 return -EINVAL;
1182
1183 if (!pcie_capability_reg_implemented(dev, pos))
1184 return -EINVAL;
1185
1186 return pci_read_config_word(dev, pci_pcie_cap(dev) + pos, dst);
1187 }
1188
1189 static inline int
1190 pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val)
1191 {
1192 if (pos & 1)
1193 return -EINVAL;
1194
1195 if (!pcie_capability_reg_implemented(dev, pos))
1196 return 0;
1197
1198 return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val);
1199 }
1200
1201 static inline int
1202 pcie_capability_set_word(struct pci_dev *dev, int pos, uint16_t val)
1203 {
1204 int error;
1205 uint16_t v;
1206
1207 error = pcie_capability_read_word(dev, pos, &v);
1208 if (error != 0)
1209 return (error);
1210
1211 v |= val;
1212
1213 error = pcie_capability_write_word(dev, pos, v);
1214 return (error);
1215 }
1216
1217 static inline int
1218 pcie_capability_clear_word(struct pci_dev *dev, int pos, uint16_t val)
1219 {
1220 int error;
1221 uint16_t v;
1222
1223 error = pcie_capability_read_word(dev, pos, &v);
1224 if (error != 0)
1225 return (error);
1226
1227 v &= ~val;
1228
1229 error = pcie_capability_write_word(dev, pos, v);
1230 return (error);
1231 }
1232
1233 static inline int pcie_get_minimum_link(struct pci_dev *dev,
1234 enum pci_bus_speed *speed, enum pcie_link_width *width)
1235 {
1236 *speed = PCI_SPEED_UNKNOWN;
1237 *width = PCIE_LNK_WIDTH_UNKNOWN;
1238 return (0);
1239 }
1240
1241 static inline int
1242 pci_num_vf(struct pci_dev *dev)
1243 {
1244 return (0);
1245 }
1246
1247 static inline enum pci_bus_speed
1248 pcie_get_speed_cap(struct pci_dev *dev)
1249 {
1250 device_t root;
1251 uint32_t lnkcap, lnkcap2;
1252 int error, pos;
1253
1254 root = device_get_parent(dev->dev.bsddev);
1255 if (root == NULL)
1256 return (PCI_SPEED_UNKNOWN);
1257 root = device_get_parent(root);
1258 if (root == NULL)
1259 return (PCI_SPEED_UNKNOWN);
1260 root = device_get_parent(root);
1261 if (root == NULL)
1262 return (PCI_SPEED_UNKNOWN);
1263
1264 if (pci_get_vendor(root) == PCI_VENDOR_ID_VIA ||
1265 pci_get_vendor(root) == PCI_VENDOR_ID_SERVERWORKS)
1266 return (PCI_SPEED_UNKNOWN);
1267
1268 if ((error = pci_find_cap(root, PCIY_EXPRESS, &pos)) != 0)
1269 return (PCI_SPEED_UNKNOWN);
1270
1271 lnkcap2 = pci_read_config(root, pos + PCIER_LINK_CAP2, 4);
1272
1273 if (lnkcap2) { /* PCIe r3.0-compliant */
1274 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_2_5GB)
1275 return (PCIE_SPEED_2_5GT);
1276 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_5_0GB)
1277 return (PCIE_SPEED_5_0GT);
1278 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_8_0GB)
1279 return (PCIE_SPEED_8_0GT);
1280 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_16_0GB)
1281 return (PCIE_SPEED_16_0GT);
1282 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_32_0GB)
1283 return (PCIE_SPEED_32_0GT);
1284 if (lnkcap2 & PCI_EXP_LNKCAP2_SLS_64_0GB)
1285 return (PCIE_SPEED_64_0GT);
1286 } else { /* pre-r3.0 */
1287 lnkcap = pci_read_config(root, pos + PCIER_LINK_CAP, 4);
1288 if (lnkcap & PCI_EXP_LNKCAP_SLS_2_5GB)
1289 return (PCIE_SPEED_2_5GT);
1290 if (lnkcap & PCI_EXP_LNKCAP_SLS_5_0GB)
1291 return (PCIE_SPEED_5_0GT);
1292 if (lnkcap & PCI_EXP_LNKCAP_SLS_8_0GB)
1293 return (PCIE_SPEED_8_0GT);
1294 if (lnkcap & PCI_EXP_LNKCAP_SLS_16_0GB)
1295 return (PCIE_SPEED_16_0GT);
1296 if (lnkcap & PCI_EXP_LNKCAP_SLS_32_0GB)
1297 return (PCIE_SPEED_32_0GT);
1298 if (lnkcap & PCI_EXP_LNKCAP_SLS_64_0GB)
1299 return (PCIE_SPEED_64_0GT);
1300 }
1301 return (PCI_SPEED_UNKNOWN);
1302 }
1303
1304 static inline enum pcie_link_width
1305 pcie_get_width_cap(struct pci_dev *dev)
1306 {
1307 uint32_t lnkcap;
1308
1309 pcie_capability_read_dword(dev, PCI_EXP_LNKCAP, &lnkcap);
1310 if (lnkcap)
1311 return ((lnkcap & PCI_EXP_LNKCAP_MLW) >> 4);
1312
1313 return (PCIE_LNK_WIDTH_UNKNOWN);
1314 }
1315
1316 static inline int
1317 pcie_get_mps(struct pci_dev *dev)
1318 {
1319 return (pci_get_max_payload(dev->dev.bsddev));
1320 }
1321
1322 static inline uint32_t
1323 PCIE_SPEED2MBS_ENC(enum pci_bus_speed spd)
1324 {
1325
1326 switch(spd) {
1327 case PCIE_SPEED_64_0GT:
1328 return (64000 * 128 / 130);
1329 case PCIE_SPEED_32_0GT:
1330 return (32000 * 128 / 130);
1331 case PCIE_SPEED_16_0GT:
1332 return (16000 * 128 / 130);
1333 case PCIE_SPEED_8_0GT:
1334 return (8000 * 128 / 130);
1335 case PCIE_SPEED_5_0GT:
1336 return (5000 * 8 / 10);
1337 case PCIE_SPEED_2_5GT:
1338 return (2500 * 8 / 10);
1339 default:
1340 return (0);
1341 }
1342 }
1343
1344 static inline uint32_t
1345 pcie_bandwidth_available(struct pci_dev *pdev,
1346 struct pci_dev **limiting,
1347 enum pci_bus_speed *speed,
1348 enum pcie_link_width *width)
1349 {
1350 enum pci_bus_speed nspeed = pcie_get_speed_cap(pdev);
1351 enum pcie_link_width nwidth = pcie_get_width_cap(pdev);
1352
1353 if (speed)
1354 *speed = nspeed;
1355 if (width)
1356 *width = nwidth;
1357
1358 return (nwidth * PCIE_SPEED2MBS_ENC(nspeed));
1359 }
1360
1361 static inline struct pci_dev *
1362 pcie_find_root_port(struct pci_dev *pdev)
1363 {
1364 device_t root;
1365
1366 if (pdev->root != NULL)
1367 return (pdev->root);
1368
1369 root = pci_find_pcie_root_port(pdev->dev.bsddev);
1370 if (root == NULL)
1371 return (NULL);
1372
1373 pdev->root = lkpinew_pci_dev(root);
1374 return (pdev->root);
1375 }
1376
1377 /* This is needed when people rip out the device "HotPlug". */
1378 static inline void
1379 pci_lock_rescan_remove(void)
1380 {
1381 }
1382
1383 static inline void
1384 pci_unlock_rescan_remove(void)
1385 {
1386 }
1387
1388 static __inline void
1389 pci_stop_and_remove_bus_device(struct pci_dev *pdev)
1390 {
1391 }
1392
1393 /*
1394 * The following functions can be used to attach/detach the LinuxKPI's
1395 * PCI device runtime. The pci_driver and pci_device_id pointer is
1396 * allowed to be NULL. Other pointers must be all valid.
1397 * The pci_dev structure should be zero-initialized before passed
1398 * to the linux_pci_attach_device function.
1399 */
1400 extern int linux_pci_attach_device(device_t, struct pci_driver *,
1401 const struct pci_device_id *, struct pci_dev *);
1402 extern int linux_pci_detach_device(struct pci_dev *);
1403
1404 static inline int
1405 pci_dev_present(const struct pci_device_id *cur)
1406 {
1407 while (cur != NULL && (cur->vendor || cur->device)) {
1408 if (pci_find_device(cur->vendor, cur->device) != NULL) {
1409 return (1);
1410 }
1411 cur++;
1412 }
1413 return (0);
1414 }
1415
1416 struct pci_dev *lkpi_pci_get_domain_bus_and_slot(int domain,
1417 unsigned int bus, unsigned int devfn);
1418 #define pci_get_domain_bus_and_slot(domain, bus, devfn) \
1419 lkpi_pci_get_domain_bus_and_slot(domain, bus, devfn)
1420
1421 static inline int
1422 pci_domain_nr(struct pci_bus *pbus)
1423 {
1424
1425 return (pbus->domain);
1426 }
1427
1428 static inline int
1429 pci_bus_read_config(struct pci_bus *bus, unsigned int devfn,
1430 int pos, uint32_t *val, int len)
1431 {
1432
1433 *val = pci_read_config(bus->self->dev.bsddev, pos, len);
1434 return (0);
1435 }
1436
1437 static inline int
1438 pci_bus_read_config_word(struct pci_bus *bus, unsigned int devfn, int pos, u16 *val)
1439 {
1440 uint32_t tmp;
1441 int ret;
1442
1443 ret = pci_bus_read_config(bus, devfn, pos, &tmp, 2);
1444 *val = (u16)tmp;
1445 return (ret);
1446 }
1447
1448 static inline int
1449 pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn, int pos, u8 *val)
1450 {
1451 uint32_t tmp;
1452 int ret;
1453
1454 ret = pci_bus_read_config(bus, devfn, pos, &tmp, 1);
1455 *val = (u8)tmp;
1456 return (ret);
1457 }
1458
1459 static inline int
1460 pci_bus_write_config(struct pci_bus *bus, unsigned int devfn, int pos,
1461 uint32_t val, int size)
1462 {
1463
1464 pci_write_config(bus->self->dev.bsddev, pos, val, size);
1465 return (0);
1466 }
1467
1468 static inline int
1469 pci_bus_write_config_byte(struct pci_bus *bus, unsigned int devfn, int pos,
1470 uint8_t val)
1471 {
1472 return (pci_bus_write_config(bus, devfn, pos, val, 1));
1473 }
1474
1475 static inline int
1476 pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, int pos,
1477 uint16_t val)
1478 {
1479 return (pci_bus_write_config(bus, devfn, pos, val, 2));
1480 }
1481
1482 struct pci_dev *lkpi_pci_get_class(unsigned int class, struct pci_dev *from);
1483 #define pci_get_class(class, from) lkpi_pci_get_class(class, from)
1484
1485 /* -------------------------------------------------------------------------- */
1486
1487 static inline int
1488 pcim_enable_device(struct pci_dev *pdev)
1489 {
1490 struct pci_devres *dr;
1491 int error;
1492
1493 /* Here we cannot run through the pdev->managed check. */
1494 dr = lkpi_pci_devres_get_alloc(pdev);
1495 if (dr == NULL)
1496 return (-ENOMEM);
1497
1498 /* If resources were enabled before do not do it again. */
1499 if (dr->enable_io)
1500 return (0);
1501
1502 error = pci_enable_device(pdev);
1503 if (error == 0)
1504 dr->enable_io = true;
1505
1506 /* This device is not managed. */
1507 pdev->managed = true;
1508
1509 return (error);
1510 }
1511
1512 static inline void __iomem **
1513 pcim_iomap_table(struct pci_dev *pdev)
1514 {
1515 struct pcim_iomap_devres *dr;
1516
1517 dr = lkpi_pcim_iomap_devres_find(pdev);
1518 if (dr == NULL)
1519 return (NULL);
1520
1521 /*
1522 * If the driver has manually set a flag to be able to request the
1523 * resource to use bus_read/write_<n>, return the shadow table.
1524 */
1525 if (pdev->want_iomap_res)
1526 return ((void **)dr->res_table);
1527
1528 /* This is the Linux default. */
1529 return (dr->mmio_table);
1530 }
1531
1532 static inline int
1533 pcim_iomap_regions(struct pci_dev *pdev, uint32_t mask, const char *name)
1534 {
1535 struct pcim_iomap_devres *dr;
1536 void *res;
1537 uint32_t mappings;
1538 int bar;
1539
1540 dr = lkpi_pcim_iomap_devres_find(pdev);
1541 if (dr == NULL)
1542 return (-ENOMEM);
1543
1544 /* Now iomap all the requested (by "mask") ones. */
1545 for (bar = mappings = 0; mappings != mask; bar++) {
1546 if ((mask & (1 << bar)) == 0)
1547 continue;
1548
1549 /* Request double is not allowed. */
1550 if (dr->mmio_table[bar] != NULL) {
1551 device_printf(pdev->dev.bsddev, "%s: bar %d %p\n",
1552 __func__, bar, dr->mmio_table[bar]);
1553 goto err;
1554 }
1555
1556 res = _lkpi_pci_iomap(pdev, bar, 0);
1557 if (res == NULL)
1558 goto err;
1559 dr->mmio_table[bar] = (void *)rman_get_bushandle(res);
1560 dr->res_table[bar] = res;
1561
1562 mappings |= (1 << bar);
1563 }
1564
1565 return (0);
1566 err:
1567 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
1568 if ((mappings & (1 << bar)) != 0) {
1569 res = dr->mmio_table[bar];
1570 if (res == NULL)
1571 continue;
1572 pci_iounmap(pdev, res);
1573 }
1574 }
1575
1576 return (-EINVAL);
1577 }
1578
1579 static inline int
1580 pcim_iomap_regions_request_all(struct pci_dev *pdev, uint32_t mask, char *name)
1581 {
1582 uint32_t requests, req_mask;
1583 int bar, error;
1584
1585 /* Request all the BARs ("regions") we do not iomap. */
1586 req_mask = ((1 << (PCIR_MAX_BAR_0 + 1)) - 1) & ~mask;
1587 for (bar = requests = 0; requests != req_mask; bar++) {
1588 if ((req_mask & (1 << bar)) == 0)
1589 continue;
1590 error = pci_request_region(pdev, bar, name);
1591 if (error != 0 && error != -ENODEV)
1592 goto err;
1593 requests |= (1 << bar);
1594 }
1595
1596 error = pcim_iomap_regions(pdev, mask, name);
1597 if (error != 0)
1598 goto err;
1599
1600 return (0);
1601
1602 err:
1603 for (bar = PCIR_MAX_BAR_0; bar >= 0; bar--) {
1604 if ((requests & (1 << bar)) != 0)
1605 pci_release_region(pdev, bar);
1606 }
1607
1608 return (-EINVAL);
1609 }
1610
1611 /*
1612 * We cannot simply re-define pci_get_device() as we would normally do
1613 * and then hide it in linux_pci.c as too many semi-native drivers still
1614 * inlucde linux/pci.h and run into the conflict with native PCI. Linux drivers
1615 * using pci_get_device() need to be changed to call linuxkpi_pci_get_device().
1616 */
1617 static inline struct pci_dev *
1618 linuxkpi_pci_get_device(uint16_t vendor, uint16_t device, struct pci_dev *odev)
1619 {
1620
1621 return (lkpi_pci_get_device(vendor, device, odev));
1622 }
1623
1624 /* This is a FreeBSD extension so we can use bus_*(). */
1625 static inline void
1626 linuxkpi_pcim_want_to_use_bus_functions(struct pci_dev *pdev)
1627 {
1628 pdev->want_iomap_res = true;
1629 }
1630
1631 static inline bool
1632 pci_is_thunderbolt_attached(struct pci_dev *pdev)
1633 {
1634
1635 return (false);
1636 }
1637
1638 static inline void *
1639 pci_platform_rom(struct pci_dev *pdev, size_t *size)
1640 {
1641
1642 return (NULL);
1643 }
1644
1645 static inline void
1646 pci_ignore_hotplug(struct pci_dev *pdev)
1647 {
1648 }
1649
1650 static inline const char *
1651 pci_power_name(pci_power_t state)
1652 {
1653 int pstate = state + 1;
1654
1655 if (pstate >= 0 && pstate < nitems(pci_power_names))
1656 return (pci_power_names[pstate]);
1657 else
1658 return (pci_power_names[0]);
1659 }
1660
1661 static inline int
1662 pcie_get_readrq(struct pci_dev *dev)
1663 {
1664 u16 ctl;
1665
1666 if (pcie_capability_read_word(dev, PCI_EXP_DEVCTL, &ctl))
1667 return (-EINVAL);
1668
1669 return (128 << ((ctl & PCI_EXP_DEVCTL_READRQ) >> 12));
1670 }
1671
1672 static inline bool
1673 pci_is_enabled(struct pci_dev *pdev)
1674 {
1675
1676 return ((pci_read_config(pdev->dev.bsddev, PCIR_COMMAND, 2) &
1677 PCIM_CMD_BUSMASTEREN) != 0);
1678 }
1679
1680 static inline int
1681 pci_wait_for_pending_transaction(struct pci_dev *pdev)
1682 {
1683
1684 return (0);
1685 }
1686
1687 #endif /* _LINUXKPI_LINUX_PCI_H_ */
Cache object: 665560783091868e16cc94d7bda833ff
|