The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/include/asm-mips64/pci.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*
    2  * This file is subject to the terms and conditions of the GNU General Public
    3  * License.  See the file "COPYING" in the main directory of this archive
    4  * for more details.
    5  */
    6 #ifndef _ASM_PCI_H
    7 #define _ASM_PCI_H
    8 
    9 #include <linux/config.h>
   10 
   11 #ifdef __KERNEL__
   12 
   13 /* Can be used to override the logic in pci_scan_bus for skipping
   14    already-configured bus numbers - to be used for buggy BIOSes
   15    or architectures with incomplete PCI setup by the loader */
   16 
   17 #ifdef CONFIG_PCI
   18 extern unsigned int pcibios_assign_all_busses(void);
   19 #else
   20 #define pcibios_assign_all_busses()     0
   21 #endif
   22 
   23 #define PCIBIOS_MIN_IO          0x1000
   24 #define PCIBIOS_MIN_MEM         0x10000000
   25 
   26 extern void pcibios_set_master(struct pci_dev *dev);
   27 
   28 static inline void pcibios_penalize_isa_irq(int irq)
   29 {
   30         /* We don't do dynamic PCI IRQ allocation */
   31 }
   32 
   33 /*
   34  * Dynamic DMA mapping stuff.
   35  * MIPS has everything mapped statically.
   36  */
   37 
   38 #include <linux/types.h>
   39 #include <linux/slab.h>
   40 #include <asm/scatterlist.h>
   41 #include <linux/string.h>
   42 #include <asm/io.h>
   43 
   44 #if (defined(CONFIG_DDB5074) || defined(CONFIG_DDB5476))
   45 #undef PCIBIOS_MIN_IO
   46 #undef PCIBIOS_MIN_MEM
   47 #define PCIBIOS_MIN_IO          0x0100000
   48 #define PCIBIOS_MIN_MEM         0x1000000
   49 #endif
   50 
   51 struct pci_dev;
   52 
   53 /*
   54  * The PCI address space does equal the physical memory address space.  The
   55  * networking and block device layers use this boolean for bounce buffer
   56  * decisions.
   57  */
   58 #define PCI_DMA_BUS_IS_PHYS     (1)
   59 
   60 /*
   61  * Allocate and map kernel buffer using consistent mode DMA for a device.
   62  * hwdev should be valid struct pci_dev pointer for PCI devices,
   63  * NULL for PCI-like buses (ISA, EISA).
   64  * Returns non-NULL cpu-view pointer to the buffer if successful and
   65  * sets *dma_addrp to the pci side dma address as well, else *dma_addrp
   66  * is undefined.
   67  */
   68 extern void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size,
   69                                   dma_addr_t *dma_handle);
   70 
   71 /*
   72  * Free and unmap a consistent DMA buffer.
   73  * cpu_addr is what was returned from pci_alloc_consistent,
   74  * size must be the same as what as passed into pci_alloc_consistent,
   75  * and likewise dma_addr must be the same as what *dma_addrp was set to.
   76  *
   77  * References to the memory and mappings associated with cpu_addr/dma_addr
   78  * past this call are illegal.
   79  */
   80 extern void pci_free_consistent(struct pci_dev *hwdev, size_t size,
   81                                 void *vaddr, dma_addr_t dma_handle);
   82 
   83 /*
   84  * Map a single buffer of the indicated size for DMA in streaming mode.
   85  * The 32-bit bus address to use is returned.
   86  *
   87  * Once the device is given the dma address, the device owns this memory
   88  * until either pci_unmap_single or pci_dma_sync_single is performed.
   89  */
   90 static inline dma_addr_t pci_map_single(struct pci_dev *hwdev, void *ptr,
   91                                         size_t size, int direction)
   92 {
   93         unsigned long addr = (unsigned long) ptr;
   94 
   95         if (direction == PCI_DMA_NONE)
   96                 out_of_line_bug();
   97 
   98         dma_cache_wback_inv(addr, size);
   99 
  100         return bus_to_baddr(hwdev->bus, __pa(ptr));
  101 }
  102 
  103 /*
  104  * Unmap a single streaming mode DMA translation.  The dma_addr and size
  105  * must match what was provided for in a previous pci_map_single call.  All
  106  * other usages are undefined.
  107  *
  108  * After this call, reads by the cpu to the buffer are guarenteed to see
  109  * whatever the device wrote there.
  110  */
  111 static inline void pci_unmap_single(struct pci_dev *hwdev, dma_addr_t dma_addr,
  112                                     size_t size, int direction)
  113 {
  114         if (direction == PCI_DMA_NONE)
  115                 out_of_line_bug();
  116 
  117         if (direction != PCI_DMA_TODEVICE) {
  118                 unsigned long addr;
  119 
  120                 addr = baddr_to_bus(hwdev->bus, dma_addr) + PAGE_OFFSET;
  121                 dma_cache_wback_inv(addr, size);
  122         }
  123 }
  124 
  125 /*
  126  * pci_{map,unmap}_single_page maps a kernel page to a dma_addr_t. identical
  127  * to pci_map_single, but takes a struct page instead of a virtual address
  128  */
  129 static inline dma_addr_t pci_map_page(struct pci_dev *hwdev, struct page *page,
  130                                       unsigned long offset, size_t size,
  131                                       int direction)
  132 {
  133         unsigned long addr;
  134 
  135         if (direction == PCI_DMA_NONE)
  136                 out_of_line_bug();
  137 
  138         addr = (unsigned long) page_address(page) + offset;
  139         dma_cache_wback_inv(addr, size);
  140 
  141         return bus_to_baddr(hwdev->bus, page_to_phys(page) + offset);
  142 }
  143 
  144 static inline void pci_unmap_page(struct pci_dev *hwdev, dma_addr_t dma_address,
  145                                   size_t size, int direction)
  146 {
  147         if (direction == PCI_DMA_NONE)
  148                 out_of_line_bug();
  149 
  150         if (direction != PCI_DMA_TODEVICE) {
  151                 unsigned long addr;
  152 
  153                 addr = baddr_to_bus(hwdev->bus, dma_address) + PAGE_OFFSET;
  154                 dma_cache_wback_inv(addr, size);
  155         }
  156 }
  157 
  158 /* pci_unmap_{page,single} is a nop so... */
  159 #define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME)
  160 #define DECLARE_PCI_UNMAP_LEN(LEN_NAME)
  161 #define pci_unmap_addr(PTR, ADDR_NAME)          (0)
  162 #define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
  163 #define pci_unmap_len(PTR, LEN_NAME)            (0)
  164 #define pci_unmap_len_set(PTR, LEN_NAME, VAL)   do { } while (0)
  165 
  166 /*
  167  * Map a set of buffers described by scatterlist in streaming
  168  * mode for DMA.  This is the scather-gather version of the
  169  * above pci_map_single interface.  Here the scatter gather list
  170  * elements are each tagged with the appropriate dma address
  171  * and length.  They are obtained via sg_dma_{address,length}(SG).
  172  *
  173  * NOTE: An implementation may be able to use a smaller number of
  174  *       DMA address/length pairs than there are SG table elements.
  175  *       (for example via virtual mapping capabilities)
  176  *       The routine returns the number of addr/length pairs actually
  177  *       used, at most nents.
  178  *
  179  * Device ownership issues as mentioned above for pci_map_single are
  180  * the same here.
  181  */
  182 static inline int pci_map_sg(struct pci_dev *hwdev, struct scatterlist *sg,
  183                              int nents, int direction)
  184 {
  185         int i;
  186 
  187         if (direction == PCI_DMA_NONE)
  188                 out_of_line_bug();
  189 
  190         for (i = 0; i < nents; i++, sg++) {
  191                 if (sg->address && sg->page)
  192                         out_of_line_bug();
  193                 else if (!sg->address && !sg->page)
  194                         out_of_line_bug();
  195 
  196                 if (sg->address) {
  197                         dma_cache_wback_inv((unsigned long)sg->address,
  198                                             sg->length);
  199                         sg->dma_address = bus_to_baddr(hwdev->bus, __pa(sg->address));
  200                 } else {
  201                         sg->dma_address = page_to_bus(sg->page) +
  202                                           sg->offset;
  203                         dma_cache_wback_inv((unsigned long)
  204                                 (page_address(sg->page) + sg->offset),
  205                                 sg->length);
  206                 }
  207         }
  208 
  209         return nents;
  210 }
  211 
  212 /*
  213  * Unmap a set of streaming mode DMA translations.
  214  * Again, cpu read rules concerning calls here are the same as for
  215  * pci_unmap_single() above.
  216  */
  217 static inline void pci_unmap_sg(struct pci_dev *hwdev, struct scatterlist *sg,
  218                                 int nents, int direction)
  219 {
  220         int i;
  221 
  222         if (direction == PCI_DMA_NONE)
  223                 out_of_line_bug();
  224 
  225         if (direction == PCI_DMA_TODEVICE)
  226                 return;
  227 
  228         for (i = 0; i < nents; i++, sg++) {
  229                 if (sg->address && sg->page)
  230                         out_of_line_bug();
  231                 else if (!sg->address && !sg->page)
  232                         out_of_line_bug();
  233 
  234                 if (!sg->address)
  235                         continue;
  236                 dma_cache_wback_inv((unsigned long)sg->address, sg->length);
  237         }
  238 }
  239 
  240 /*
  241  * Make physical memory consistent for a single
  242  * streaming mode DMA translation after a transfer.
  243  *
  244  * If you perform a pci_map_single() but wish to interrogate the
  245  * buffer using the cpu, yet do not wish to teardown the PCI dma
  246  * mapping, you must call this function before doing so.  At the
  247  * next point you give the PCI dma address back to the card, the
  248  * device again owns the buffer.
  249  */
  250 static inline void pci_dma_sync_single(struct pci_dev *hwdev,
  251                                        dma_addr_t dma_handle,
  252                                        size_t size, int direction)
  253 {
  254         unsigned long addr;
  255 
  256         if (direction == PCI_DMA_NONE)
  257                 out_of_line_bug();
  258 
  259         addr = baddr_to_bus(hwdev->bus, dma_handle) + PAGE_OFFSET;
  260         dma_cache_wback_inv(addr, size);
  261 }
  262 
  263 /*
  264  * Make physical memory consistent for a set of streaming
  265  * mode DMA translations after a transfer.
  266  *
  267  * The same as pci_dma_sync_single but for a scatter-gather list,
  268  * same rules and usage.
  269  */
  270 static inline void pci_dma_sync_sg(struct pci_dev *hwdev,
  271                                    struct scatterlist *sg,
  272                                    int nelems, int direction)
  273 {
  274 #ifdef CONFIG_NONCOHERENT_IO
  275         int i;
  276 #endif
  277 
  278         if (direction == PCI_DMA_NONE)
  279                 out_of_line_bug();
  280 
  281         /* Make sure that gcc doesn't leave the empty loop body.  */
  282 #ifdef CONFIG_NONCOHERENT_IO
  283         for (i = 0; i < nelems; i++, sg++)
  284                 dma_cache_wback_inv((unsigned long)sg->address, sg->length);
  285 #endif
  286 }
  287 
  288 /*
  289  * Return whether the given PCI device DMA address mask can
  290  * be supported properly.  For example, if your device can
  291  * only drive the low 24-bits during PCI bus mastering, then
  292  * you would pass 0x00ffffff as the mask to this function.
  293  */
  294 static inline int pci_dma_supported(struct pci_dev *hwdev, u64 mask)
  295 {
  296         /*
  297          * we fall back to GFP_DMA when the mask isn't all 1s,
  298          * so we can't guarantee allocations that must be
  299          * within a tighter range than GFP_DMA..
  300          */
  301 #ifdef CONFIG_ISA
  302         if (mask < 0x00ffffff)
  303                 return 0;
  304 #endif
  305 
  306         return 1;
  307 }
  308 
  309 /* This is always fine. */
  310 #define pci_dac_dma_supported(pci_dev, mask)    (1)
  311 
  312 static inline dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
  313         struct page *page, unsigned long offset, int direction)
  314 {
  315         dma64_addr_t addr = page_to_phys(page) + offset;
  316 
  317         return (dma64_addr_t) bus_to_baddr(pdev->bus, addr);
  318 }
  319 
  320 static inline struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
  321         dma64_addr_t dma_addr)
  322 {
  323         unsigned long poff = baddr_to_bus(pdev->bus, dma_addr) >> PAGE_SHIFT;
  324 
  325         return mem_map + poff;
  326 }
  327 
  328 static inline unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
  329         dma64_addr_t dma_addr)
  330 {
  331         return dma_addr & ~PAGE_MASK;
  332 }
  333 
  334 static inline void pci_dac_dma_sync_single(struct pci_dev *pdev,
  335         dma64_addr_t dma_addr, size_t len, int direction)
  336 {
  337         unsigned long addr;
  338 
  339         if (direction == PCI_DMA_NONE)
  340                 BUG();
  341 
  342         addr = baddr_to_bus(pdev->bus, dma_addr) + PAGE_OFFSET;
  343         dma_cache_wback_inv(addr, len);
  344 }
  345 
  346 /*
  347  * Return the index of the PCI controller for device.
  348  */
  349 #define pci_controller_num(pdev)        ({ (void)(pdev); 0; })
  350 
  351 /*
  352  * These macros should be used after a pci_map_sg call has been done
  353  * to get bus addresses of each of the SG entries and their lengths.
  354  * You should only work with the number of sg entries pci_map_sg
  355  * returns, or alternatively stop on the first sg_dma_len(sg) which
  356  * is 0.
  357  */
  358 #define sg_dma_address(sg)      ((sg)->dma_address)
  359 #define sg_dma_len(sg)          ((sg)->length)
  360 
  361 #endif /* __KERNEL__ */
  362 
  363 #endif /* _ASM_PCI_H */

Cache object: e0f30263e3e33329edfc61b13f696f4e


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.