The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/arm64/arm64/gicv3_its.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2015-2016 The FreeBSD Foundation
    3  * All rights reserved.
    4  *
    5  * This software was developed by Andrew Turner under
    6  * the sponsorship of the FreeBSD Foundation.
    7  *
    8  * This software was developed by Semihalf under
    9  * the sponsorship of the FreeBSD Foundation.
   10  *
   11  * Redistribution and use in source and binary forms, with or without
   12  * modification, are permitted provided that the following conditions
   13  * are met:
   14  * 1. Redistributions of source code must retain the above copyright
   15  *    notice, this list of conditions and the following disclaimer.
   16  * 2. Redistributions in binary form must reproduce the above copyright
   17  *    notice, this list of conditions and the following disclaimer in the
   18  *    documentation and/or other materials provided with the distribution.
   19  *
   20  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   21  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   22  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   23  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   24  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   25  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   26  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   27  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   28  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   29  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   30  * SUCH DAMAGE.
   31  */
   32 
   33 #include "opt_platform.h"
   34 
   35 #include <sys/cdefs.h>
   36 __FBSDID("$FreeBSD: releng/11.2/sys/arm64/arm64/gicv3_its.c 305529 2016-09-07 12:10:30Z andrew $");
   37 
   38 #include <sys/param.h>
   39 #include <sys/systm.h>
   40 #include <sys/bus.h>
   41 #include <sys/cpuset.h>
   42 #include <sys/endian.h>
   43 #include <sys/kernel.h>
   44 #include <sys/malloc.h>
   45 #include <sys/module.h>
   46 #include <sys/proc.h>
   47 #include <sys/queue.h>
   48 #include <sys/rman.h>
   49 #include <sys/smp.h>
   50 #include <sys/vmem.h>
   51 
   52 #include <vm/vm.h>
   53 #include <vm/pmap.h>
   54 
   55 #include <machine/bus.h>
   56 #include <machine/intr.h>
   57 
   58 #include <arm64/arm64/gic_v3_reg.h>
   59 #include <arm64/arm64/gic_v3_var.h>
   60 
   61 #ifdef FDT
   62 #include <dev/ofw/openfirm.h>
   63 #include <dev/ofw/ofw_bus.h>
   64 #include <dev/ofw/ofw_bus_subr.h>
   65 #endif
   66 #include <dev/pci/pcireg.h>
   67 #include <dev/pci/pcivar.h>
   68 
   69 #include "pcib_if.h"
   70 #include "pic_if.h"
   71 #include "msi_if.h"
   72 
   73 MALLOC_DEFINE(M_GICV3_ITS, "GICv3 ITS",
   74     "ARM GICv3 Interrupt Translation Service");
   75 
   76 #define LPI_NIRQS               (64 * 1024)
   77 
   78 /* The size and alignment of the command circular buffer */
   79 #define ITS_CMDQ_SIZE           (64 * 1024)     /* Must be a multiple of 4K */
   80 #define ITS_CMDQ_ALIGN          (64 * 1024)
   81 
   82 #define LPI_CONFTAB_SIZE        LPI_NIRQS
   83 #define LPI_CONFTAB_ALIGN       (64 * 1024)
   84 #define LPI_CONFTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
   85 
   86 /* 1 bit per SPI, PPI, and SGI (8k), and 1 bit per LPI (LPI_CONFTAB_SIZE) */
   87 #define LPI_PENDTAB_SIZE        ((LPI_NIRQS + GIC_FIRST_LPI) / 8)
   88 #define LPI_PENDTAB_ALIGN       (64 * 1024)
   89 #define LPI_PENDTAB_MAX_ADDR    ((1ul << 48) - 1) /* We need a 47 bit PA */
   90 
   91 #define LPI_INT_TRANS_TAB_ALIGN 256
   92 #define LPI_INT_TRANS_TAB_MAX_ADDR ((1ul << 48) - 1)
   93 
   94 /* ITS commands encoding */
   95 #define ITS_CMD_MOVI            (0x01)
   96 #define ITS_CMD_SYNC            (0x05)
   97 #define ITS_CMD_MAPD            (0x08)
   98 #define ITS_CMD_MAPC            (0x09)
   99 #define ITS_CMD_MAPTI           (0x0a)
  100 #define ITS_CMD_MAPI            (0x0b)
  101 #define ITS_CMD_INV             (0x0c)
  102 #define ITS_CMD_INVALL          (0x0d)
  103 /* Command */
  104 #define CMD_COMMAND_MASK        (0xFFUL)
  105 /* PCI device ID */
  106 #define CMD_DEVID_SHIFT         (32)
  107 #define CMD_DEVID_MASK          (0xFFFFFFFFUL << CMD_DEVID_SHIFT)
  108 /* Size of IRQ ID bitfield */
  109 #define CMD_SIZE_MASK           (0xFFUL)
  110 /* Virtual LPI ID */
  111 #define CMD_ID_MASK             (0xFFFFFFFFUL)
  112 /* Physical LPI ID */
  113 #define CMD_PID_SHIFT           (32)
  114 #define CMD_PID_MASK            (0xFFFFFFFFUL << CMD_PID_SHIFT)
  115 /* Collection */
  116 #define CMD_COL_MASK            (0xFFFFUL)
  117 /* Target (CPU or Re-Distributor) */
  118 #define CMD_TARGET_SHIFT        (16)
  119 #define CMD_TARGET_MASK         (0xFFFFFFFFUL << CMD_TARGET_SHIFT)
  120 /* Interrupt Translation Table address */
  121 #define CMD_ITT_MASK            (0xFFFFFFFFFF00UL)
  122 /* Valid command bit */
  123 #define CMD_VALID_SHIFT         (63)
  124 #define CMD_VALID_MASK          (1UL << CMD_VALID_SHIFT)
  125 
  126 #define ITS_TARGET_NONE         0xFBADBEEF
  127 
  128 /* LPI chunk owned by ITS device */
  129 struct lpi_chunk {
  130         u_int   lpi_base;
  131         u_int   lpi_free;       /* First free LPI in set */
  132         u_int   lpi_num;        /* Total number of LPIs in chunk */
  133         u_int   lpi_busy;       /* Number of busy LPIs in chink */
  134 };
  135 
  136 /* ITS device */
  137 struct its_dev {
  138         TAILQ_ENTRY(its_dev)    entry;
  139         /* PCI device */
  140         device_t                pci_dev;
  141         /* Device ID (i.e. PCI device ID) */
  142         uint32_t                devid;
  143         /* List of assigned LPIs */
  144         struct lpi_chunk        lpis;
  145         /* Virtual address of ITT */
  146         vm_offset_t             itt;
  147         size_t                  itt_size;
  148 };
  149 
  150 /*
  151  * ITS command descriptor.
  152  * Idea for command description passing taken from Linux.
  153  */
  154 struct its_cmd_desc {
  155         uint8_t cmd_type;
  156 
  157         union {
  158                 struct {
  159                         struct its_dev *its_dev;
  160                         struct its_col *col;
  161                         uint32_t id;
  162                 } cmd_desc_movi;
  163 
  164                 struct {
  165                         struct its_col *col;
  166                 } cmd_desc_sync;
  167 
  168                 struct {
  169                         struct its_col *col;
  170                         uint8_t valid;
  171                 } cmd_desc_mapc;
  172 
  173                 struct {
  174                         struct its_dev *its_dev;
  175                         struct its_col *col;
  176                         uint32_t pid;
  177                         uint32_t id;
  178                 } cmd_desc_mapvi;
  179 
  180                 struct {
  181                         struct its_dev *its_dev;
  182                         struct its_col *col;
  183                         uint32_t pid;
  184                 } cmd_desc_mapi;
  185 
  186                 struct {
  187                         struct its_dev *its_dev;
  188                         uint8_t valid;
  189                 } cmd_desc_mapd;
  190 
  191                 struct {
  192                         struct its_dev *its_dev;
  193                         struct its_col *col;
  194                         uint32_t pid;
  195                 } cmd_desc_inv;
  196 
  197                 struct {
  198                         struct its_col *col;
  199                 } cmd_desc_invall;
  200         };
  201 };
  202 
  203 /* ITS command. Each command is 32 bytes long */
  204 struct its_cmd {
  205         uint64_t        cmd_dword[4];   /* ITS command double word */
  206 };
  207 
  208 /* An ITS private table */
  209 struct its_ptable {
  210         vm_offset_t     ptab_vaddr;
  211         unsigned long   ptab_size;
  212 };
  213 
  214 /* ITS collection description. */
  215 struct its_col {
  216         uint64_t        col_target;     /* Target Re-Distributor */
  217         uint64_t        col_id;         /* Collection ID */
  218 };
  219 
  220 struct gicv3_its_irqsrc {
  221         struct intr_irqsrc      gi_isrc;
  222         u_int                   gi_irq;
  223         struct its_dev          *gi_its_dev;
  224 };
  225 
  226 struct gicv3_its_softc {
  227         struct intr_pic *sc_pic;
  228         struct resource *sc_its_res;
  229 
  230         struct its_ptable sc_its_ptab[GITS_BASER_NUM];
  231         struct its_col *sc_its_cols[MAXCPU];    /* Per-CPU collections */
  232 
  233         /*
  234          * TODO: We should get these from the parent as we only want a
  235          * single copy of each across the interrupt controller.
  236          */
  237         vm_offset_t sc_conf_base;
  238         vm_offset_t sc_pend_base[MAXCPU];
  239 
  240         /* Command handling */
  241         struct mtx sc_its_cmd_lock;
  242         struct its_cmd *sc_its_cmd_base; /* Command circular buffer address */
  243         size_t sc_its_cmd_next_idx;
  244 
  245         vmem_t *sc_irq_alloc;
  246         struct gicv3_its_irqsrc *sc_irqs;
  247 
  248         struct mtx sc_its_dev_lock;
  249         TAILQ_HEAD(its_dev_list, its_dev) sc_its_dev_list;
  250 
  251 #define ITS_FLAGS_CMDQ_FLUSH            0x00000001
  252 #define ITS_FLAGS_LPI_CONF_FLUSH        0x00000002
  253 #define ITS_FLAGS_ERRATA_CAVIUM_22375   0x00000004
  254         u_int sc_its_flags;
  255 };
  256 
  257 typedef void (its_quirk_func_t)(device_t);
  258 static its_quirk_func_t its_quirk_cavium_22375;
  259 
  260 static const struct {
  261         const char *desc;
  262         uint32_t iidr;
  263         uint32_t iidr_mask;
  264         its_quirk_func_t *func;
  265 } its_quirks[] = {
  266         {
  267                 /* Cavium ThunderX Pass 1.x */
  268                 .desc = "Cavoum ThunderX errata: 22375, 24313",
  269                 .iidr = GITS_IIDR_RAW(GITS_IIDR_IMPL_CAVIUM,
  270                     GITS_IIDR_PROD_THUNDER, GITS_IIDR_VAR_THUNDER_1, 0),
  271                 .iidr_mask = ~GITS_IIDR_REVISION_MASK,
  272                 .func = its_quirk_cavium_22375,
  273         },
  274 };
  275 
  276 static u_int gic_irq_cpu;
  277 
  278 #define gic_its_read_4(sc, reg)                 \
  279     bus_read_4((sc)->sc_its_res, (reg))
  280 #define gic_its_read_8(sc, reg)                 \
  281     bus_read_8((sc)->sc_its_res, (reg))
  282 
  283 #define gic_its_write_4(sc, reg, val)           \
  284     bus_write_4((sc)->sc_its_res, (reg), (val))
  285 #define gic_its_write_8(sc, reg, val)           \
  286     bus_write_8((sc)->sc_its_res, (reg), (val))
  287 
  288 static device_attach_t gicv3_its_attach;
  289 static device_detach_t gicv3_its_detach;
  290 
  291 static pic_disable_intr_t gicv3_its_disable_intr;
  292 static pic_enable_intr_t gicv3_its_enable_intr;
  293 static pic_map_intr_t gicv3_its_map_intr;
  294 static pic_setup_intr_t gicv3_its_setup_intr;
  295 static pic_post_filter_t gicv3_its_post_filter;
  296 static pic_post_ithread_t gicv3_its_post_ithread;
  297 static pic_pre_ithread_t gicv3_its_pre_ithread;
  298 static pic_bind_intr_t gicv3_its_bind_intr;
  299 #ifdef SMP
  300 static pic_init_secondary_t gicv3_its_init_secondary;
  301 #endif
  302 static msi_alloc_msi_t gicv3_its_alloc_msi;
  303 static msi_release_msi_t gicv3_its_release_msi;
  304 static msi_alloc_msix_t gicv3_its_alloc_msix;
  305 static msi_release_msix_t gicv3_its_release_msix;
  306 static msi_map_msi_t gicv3_its_map_msi;
  307 
  308 static void its_cmd_movi(device_t, struct gicv3_its_irqsrc *);
  309 static void its_cmd_mapc(device_t, struct its_col *, uint8_t);
  310 static void its_cmd_mapti(device_t, struct gicv3_its_irqsrc *);
  311 static void its_cmd_mapd(device_t, struct its_dev *, uint8_t);
  312 static void its_cmd_inv(device_t, struct its_dev *, struct gicv3_its_irqsrc *);
  313 static void its_cmd_invall(device_t, struct its_col *);
  314 
  315 static device_method_t gicv3_its_methods[] = {
  316         /* Device interface */
  317         DEVMETHOD(device_detach,        gicv3_its_detach),
  318 
  319         /* Interrupt controller interface */
  320         DEVMETHOD(pic_disable_intr,     gicv3_its_disable_intr),
  321         DEVMETHOD(pic_enable_intr,      gicv3_its_enable_intr),
  322         DEVMETHOD(pic_map_intr,         gicv3_its_map_intr),
  323         DEVMETHOD(pic_setup_intr,       gicv3_its_setup_intr),
  324         DEVMETHOD(pic_post_filter,      gicv3_its_post_filter),
  325         DEVMETHOD(pic_post_ithread,     gicv3_its_post_ithread),
  326         DEVMETHOD(pic_pre_ithread,      gicv3_its_pre_ithread),
  327 #ifdef SMP
  328         DEVMETHOD(pic_bind_intr,        gicv3_its_bind_intr),
  329         DEVMETHOD(pic_init_secondary,   gicv3_its_init_secondary),
  330 #endif
  331 
  332         /* MSI/MSI-X */
  333         DEVMETHOD(msi_alloc_msi,        gicv3_its_alloc_msi),
  334         DEVMETHOD(msi_release_msi,      gicv3_its_release_msi),
  335         DEVMETHOD(msi_alloc_msix,       gicv3_its_alloc_msix),
  336         DEVMETHOD(msi_release_msix,     gicv3_its_release_msix),
  337         DEVMETHOD(msi_map_msi,          gicv3_its_map_msi),
  338 
  339         /* End */
  340         DEVMETHOD_END
  341 };
  342 
  343 static DEFINE_CLASS_0(gic, gicv3_its_driver, gicv3_its_methods,
  344     sizeof(struct gicv3_its_softc));
  345 
  346 static void
  347 gicv3_its_cmdq_init(struct gicv3_its_softc *sc)
  348 {
  349         vm_paddr_t cmd_paddr;
  350         uint64_t reg, tmp;
  351 
  352         /* Set up the command circular buffer */
  353         sc->sc_its_cmd_base = contigmalloc(ITS_CMDQ_SIZE, M_GICV3_ITS,
  354             M_WAITOK | M_ZERO, 0, (1ul << 48) - 1, ITS_CMDQ_ALIGN, 0);
  355         sc->sc_its_cmd_next_idx = 0;
  356 
  357         cmd_paddr = vtophys(sc->sc_its_cmd_base);
  358 
  359         /* Set the base of the command buffer */
  360         reg = GITS_CBASER_VALID |
  361             (GITS_CBASER_CACHE_NIWAWB << GITS_CBASER_CACHE_SHIFT) |
  362             cmd_paddr | (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT) |
  363             (ITS_CMDQ_SIZE / 4096 - 1);
  364         gic_its_write_8(sc, GITS_CBASER, reg);
  365 
  366         /* Read back to check for fixed value fields */
  367         tmp = gic_its_read_8(sc, GITS_CBASER);
  368 
  369         if ((tmp & GITS_CBASER_SHARE_MASK) !=
  370             (GITS_CBASER_SHARE_IS << GITS_CBASER_SHARE_SHIFT)) {
  371                 /* Check if the hardware reported non-shareable */
  372                 if ((tmp & GITS_CBASER_SHARE_MASK) ==
  373                     (GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT)) {
  374                         /* If so remove the cache attribute */
  375                         reg &= ~GITS_CBASER_CACHE_MASK;
  376                         reg &= ~GITS_CBASER_SHARE_MASK;
  377                         /* Set to Non-cacheable, Non-shareable */
  378                         reg |= GITS_CBASER_CACHE_NIN << GITS_CBASER_CACHE_SHIFT;
  379                         reg |= GITS_CBASER_SHARE_NS << GITS_CBASER_SHARE_SHIFT;
  380 
  381                         gic_its_write_8(sc, GITS_CBASER, reg);
  382                 }
  383 
  384                 /* The command queue has to be flushed after each command */
  385                 sc->sc_its_flags |= ITS_FLAGS_CMDQ_FLUSH;
  386         }
  387 
  388         /* Get the next command from the start of the buffer */
  389         gic_its_write_8(sc, GITS_CWRITER, 0x0);
  390 }
  391 
  392 static int
  393 gicv3_its_table_init(device_t dev, struct gicv3_its_softc *sc)
  394 {
  395         vm_offset_t table;
  396         vm_paddr_t paddr;
  397         uint64_t cache, reg, share, tmp, type;
  398         size_t esize, its_tbl_size, nidents, nitspages, npages;
  399         int i, page_size;
  400         int devbits;
  401 
  402         if ((sc->sc_its_flags & ITS_FLAGS_ERRATA_CAVIUM_22375) != 0) {
  403                 /*
  404                  * GITS_TYPER[17:13] of ThunderX reports that device IDs
  405                  * are to be 21 bits in length. The entry size of the ITS
  406                  * table can be read from GITS_BASERn[52:48] and on ThunderX
  407                  * is supposed to be 8 bytes in length (for device table).
  408                  * Finally the page size that is to be used by ITS to access
  409                  * this table will be set to 64KB.
  410                  *
  411                  * This gives 0x200000 entries of size 0x8 bytes covered by
  412                  * 256 pages each of which 64KB in size. The number of pages
  413                  * (minus 1) should then be written to GITS_BASERn[7:0]. In
  414                  * that case this value would be 0xFF but on ThunderX the
  415                  * maximum value that HW accepts is 0xFD.
  416                  *
  417                  * Set an arbitrary number of device ID bits to 20 in order
  418                  * to limit the number of entries in ITS device table to
  419                  * 0x100000 and the table size to 8MB.
  420                  */
  421                 devbits = 20;
  422                 cache = 0;
  423         } else {
  424                 devbits = GITS_TYPER_DEVB(gic_its_read_8(sc, GITS_TYPER));
  425                 cache = GITS_BASER_CACHE_WAWB;
  426         }
  427         share = GITS_BASER_SHARE_IS;
  428         page_size = PAGE_SIZE_64K;
  429 
  430         for (i = 0; i < GITS_BASER_NUM; i++) {
  431                 reg = gic_its_read_8(sc, GITS_BASER(i));
  432                 /* The type of table */
  433                 type = GITS_BASER_TYPE(reg);
  434                 /* The table entry size */
  435                 esize = GITS_BASER_ESIZE(reg);
  436 
  437                 switch(type) {
  438                 case GITS_BASER_TYPE_DEV:
  439                         nidents = (1 << devbits);
  440                         its_tbl_size = esize * nidents;
  441                         its_tbl_size = roundup2(its_tbl_size, PAGE_SIZE_64K);
  442                         break;
  443                 case GITS_BASER_TYPE_VP:
  444                 case GITS_BASER_TYPE_PP: /* Undocumented? */
  445                 case GITS_BASER_TYPE_IC:
  446                         its_tbl_size = page_size;
  447                         break;
  448                 default:
  449                         continue;
  450                 }
  451                 npages = howmany(its_tbl_size, PAGE_SIZE);
  452 
  453                 /* Allocate the table */
  454                 table = (vm_offset_t)contigmalloc(npages * PAGE_SIZE,
  455                     M_GICV3_ITS, M_WAITOK | M_ZERO, 0, (1ul << 48) - 1,
  456                     PAGE_SIZE, 0);
  457 
  458                 sc->sc_its_ptab[i].ptab_vaddr = table;
  459                 sc->sc_its_ptab[i].ptab_size = npages * PAGE_SIZE;
  460 
  461                 paddr = vtophys(table);
  462 
  463                 while (1) {
  464                         nitspages = howmany(its_tbl_size, page_size);
  465 
  466                         /* Clear the fields we will be setting */
  467                         reg &= ~(GITS_BASER_VALID |
  468                             GITS_BASER_CACHE_MASK | GITS_BASER_TYPE_MASK |
  469                             GITS_BASER_ESIZE_MASK | GITS_BASER_PA_MASK |
  470                             GITS_BASER_SHARE_MASK | GITS_BASER_PSZ_MASK |
  471                             GITS_BASER_SIZE_MASK);
  472                         /* Set the new values */
  473                         reg |= GITS_BASER_VALID |
  474                             (cache << GITS_BASER_CACHE_SHIFT) |
  475                             (type << GITS_BASER_TYPE_SHIFT) |
  476                             ((esize - 1) << GITS_BASER_ESIZE_SHIFT) |
  477                             paddr | (share << GITS_BASER_SHARE_SHIFT) |
  478                             (nitspages - 1);
  479 
  480                         switch (page_size) {
  481                         case PAGE_SIZE:         /* 4KB */
  482                                 reg |=
  483                                     GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
  484                                 break;
  485                         case PAGE_SIZE_16K:     /* 16KB */
  486                                 reg |=
  487                                     GITS_BASER_PSZ_4K << GITS_BASER_PSZ_SHIFT;
  488                                 break;
  489                         case PAGE_SIZE_64K:     /* 64KB */
  490                                 reg |=
  491                                     GITS_BASER_PSZ_64K << GITS_BASER_PSZ_SHIFT;
  492                                 break;
  493                         }
  494 
  495                         gic_its_write_8(sc, GITS_BASER(i), reg);
  496 
  497                         /* Read back to check */
  498                         tmp = gic_its_read_8(sc, GITS_BASER(i));
  499 
  500                         /* Do the snareability masks line up? */
  501                         if ((tmp & GITS_BASER_SHARE_MASK) !=
  502                             (reg & GITS_BASER_SHARE_MASK)) {
  503                                 share = (tmp & GITS_BASER_SHARE_MASK) >>
  504                                     GITS_BASER_SHARE_SHIFT;
  505                                 continue;
  506                         }
  507 
  508                         if ((tmp & GITS_BASER_PSZ_MASK) !=
  509                             (reg & GITS_BASER_PSZ_MASK)) {
  510                                 switch (page_size) {
  511                                 case PAGE_SIZE_16K:
  512                                         page_size = PAGE_SIZE;
  513                                         continue;
  514                                 case PAGE_SIZE_64K:
  515                                         page_size = PAGE_SIZE_16K;
  516                                         continue;
  517                                 }
  518                         }
  519 
  520                         if (tmp != reg) {
  521                                 device_printf(dev, "GITS_BASER%d: "
  522                                     "unable to be updated: %lx != %lx\n",
  523                                     i, reg, tmp);
  524                                 return (ENXIO);
  525                         }
  526 
  527                         /* We should have made all needed changes */
  528                         break;
  529                 }
  530         }
  531 
  532         return (0);
  533 }
  534 
  535 static void
  536 gicv3_its_conftable_init(struct gicv3_its_softc *sc)
  537 {
  538 
  539         sc->sc_conf_base = (vm_offset_t)contigmalloc(LPI_CONFTAB_SIZE,
  540             M_GICV3_ITS, M_WAITOK, 0, LPI_CONFTAB_MAX_ADDR, LPI_CONFTAB_ALIGN,
  541             0);
  542 
  543         /* Set the default configuration */
  544         memset((void *)sc->sc_conf_base, GIC_PRIORITY_MAX | LPI_CONF_GROUP1,
  545             LPI_CONFTAB_SIZE);
  546 
  547         /* Flush the table to memory */
  548         cpu_dcache_wb_range(sc->sc_conf_base, LPI_CONFTAB_SIZE);
  549 }
  550 
  551 static void
  552 gicv3_its_pendtables_init(struct gicv3_its_softc *sc)
  553 {
  554         int i;
  555 
  556         for (i = 0; i < mp_ncpus; i++) {
  557                 if (CPU_ISSET(i, &all_cpus) == 0)
  558                         continue;
  559 
  560                 sc->sc_pend_base[i] = (vm_offset_t)contigmalloc(
  561                     LPI_PENDTAB_SIZE, M_GICV3_ITS, M_WAITOK | M_ZERO,
  562                     0, LPI_PENDTAB_MAX_ADDR, LPI_PENDTAB_ALIGN, 0);
  563 
  564                 /* Flush so the ITS can see the memory */
  565                 cpu_dcache_wb_range((vm_offset_t)sc->sc_pend_base,
  566                     LPI_PENDTAB_SIZE);
  567         }
  568 }
  569 
  570 static int
  571 its_init_cpu(device_t dev, struct gicv3_its_softc *sc)
  572 {
  573         device_t gicv3;
  574         vm_paddr_t target;
  575         uint64_t xbaser, tmp;
  576         uint32_t ctlr;
  577         u_int cpuid;
  578 
  579         gicv3 = device_get_parent(dev);
  580         cpuid = PCPU_GET(cpuid);
  581 
  582         /* Check if the ITS is enabled on this CPU */
  583         if ((gic_r_read_4(gicv3, GICR_TYPER) & GICR_TYPER_PLPIS) == 0) {
  584                 return (ENXIO);
  585         }
  586 
  587         /* Disable LPIs */
  588         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
  589         ctlr &= ~GICR_CTLR_LPI_ENABLE;
  590         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
  591 
  592         /* Make sure changes are observable my the GIC */
  593         dsb(sy);
  594 
  595         /*
  596          * Set the redistributor base
  597          */
  598         xbaser = vtophys(sc->sc_conf_base) |
  599             (GICR_PROPBASER_SHARE_IS << GICR_PROPBASER_SHARE_SHIFT) |
  600             (GICR_PROPBASER_CACHE_NIWAWB << GICR_PROPBASER_CACHE_SHIFT) |
  601             (flsl(LPI_CONFTAB_SIZE | GIC_FIRST_LPI) - 1);
  602         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
  603 
  604         /* Check the cache attributes we set */
  605         tmp = gic_r_read_8(gicv3, GICR_PROPBASER);
  606 
  607         if ((tmp & GICR_PROPBASER_SHARE_MASK) !=
  608             (xbaser & GICR_PROPBASER_SHARE_MASK)) {
  609                 if ((tmp & GICR_PROPBASER_SHARE_MASK) ==
  610                     (GICR_PROPBASER_SHARE_NS << GICR_PROPBASER_SHARE_SHIFT)) {
  611                         /* We need to mark as non-cacheable */
  612                         xbaser &= ~(GICR_PROPBASER_SHARE_MASK |
  613                             GICR_PROPBASER_CACHE_MASK);
  614                         /* Non-cacheable */
  615                         xbaser |= GICR_PROPBASER_CACHE_NIN <<
  616                             GICR_PROPBASER_CACHE_SHIFT;
  617                         /* Non-sareable */
  618                         xbaser |= GICR_PROPBASER_SHARE_NS <<
  619                             GICR_PROPBASER_SHARE_SHIFT;
  620                         gic_r_write_8(gicv3, GICR_PROPBASER, xbaser);
  621                 }
  622                 sc->sc_its_flags |= ITS_FLAGS_LPI_CONF_FLUSH;
  623         }
  624 
  625         /*
  626          * Set the LPI pending table base
  627          */
  628         xbaser = vtophys(sc->sc_pend_base[cpuid]) |
  629             (GICR_PENDBASER_CACHE_NIWAWB << GICR_PENDBASER_CACHE_SHIFT) |
  630             (GICR_PENDBASER_SHARE_IS << GICR_PENDBASER_SHARE_SHIFT);
  631 
  632         gic_r_write_8(gicv3, GICR_PENDBASER, xbaser);
  633 
  634         tmp = gic_r_read_8(gicv3, GICR_PENDBASER);
  635 
  636         if ((tmp & GICR_PENDBASER_SHARE_MASK) ==
  637             (GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT)) {
  638                 /* Clear the cahce and shareability bits */
  639                 xbaser &= ~(GICR_PENDBASER_CACHE_MASK |
  640                     GICR_PENDBASER_SHARE_MASK);
  641                 /* Mark as non-shareable */
  642                 xbaser |= GICR_PENDBASER_SHARE_NS << GICR_PENDBASER_SHARE_SHIFT;
  643                 /* And non-cacheable */
  644                 xbaser |= GICR_PENDBASER_CACHE_NIN <<
  645                     GICR_PENDBASER_CACHE_SHIFT;
  646         }
  647 
  648         /* Enable LPIs */
  649         ctlr = gic_r_read_4(gicv3, GICR_CTLR);
  650         ctlr |= GICR_CTLR_LPI_ENABLE;
  651         gic_r_write_4(gicv3, GICR_CTLR, ctlr);
  652 
  653         /* Make sure the GIC has seen everything */
  654         dsb(sy);
  655 
  656         if ((gic_its_read_8(sc, GITS_TYPER) & GITS_TYPER_PTA) != 0) {
  657                 /* This ITS wants the redistributor physical address */
  658                 target = vtophys(gicv3_get_redist_vaddr(dev));
  659         } else {
  660                 /* This ITS wants the unique processor number */
  661                 target = GICR_TYPER_CPUNUM(gic_r_read_8(gicv3, GICR_TYPER));
  662         }
  663 
  664         sc->sc_its_cols[cpuid]->col_target = target;
  665         sc->sc_its_cols[cpuid]->col_id = cpuid;
  666 
  667         its_cmd_mapc(dev, sc->sc_its_cols[cpuid], 1);
  668         its_cmd_invall(dev, sc->sc_its_cols[cpuid]);
  669 
  670         return (0);
  671 }
  672 
  673 static int
  674 gicv3_its_attach(device_t dev)
  675 {
  676         struct gicv3_its_softc *sc;
  677         const char *name;
  678         uint32_t iidr;
  679         int err, i, rid;
  680 
  681         sc = device_get_softc(dev);
  682 
  683         rid = 0;
  684         sc->sc_its_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid,
  685             RF_ACTIVE);
  686         if (sc->sc_its_res == NULL) {
  687                 device_printf(dev, "Could not allocate memory\n");
  688                 return (ENXIO);
  689         }
  690 
  691         iidr = gic_its_read_4(sc, GITS_IIDR);
  692         for (i = 0; i < nitems(its_quirks); i++) {
  693                 if ((iidr & its_quirks[i].iidr_mask) == its_quirks[i].iidr) {
  694                         if (bootverbose) {
  695                                 device_printf(dev, "Applying %s\n",
  696                                     its_quirks[i].desc);
  697                         }
  698                         its_quirks[i].func(dev);
  699                         break;
  700                 }
  701         }
  702 
  703         /* Allocate the private tables */
  704         err = gicv3_its_table_init(dev, sc);
  705         if (err != 0)
  706                 return (err);
  707 
  708         /* Protects access to the device list */
  709         mtx_init(&sc->sc_its_dev_lock, "ITS device lock", NULL, MTX_SPIN);
  710 
  711         /* Protects access to the ITS command circular buffer. */
  712         mtx_init(&sc->sc_its_cmd_lock, "ITS cmd lock", NULL, MTX_SPIN);
  713 
  714         /* Allocate the command circular buffer */
  715         gicv3_its_cmdq_init(sc);
  716 
  717         /* Allocate the per-CPU collections */
  718         for (int cpu = 0; cpu < mp_ncpus; cpu++)
  719                 if (CPU_ISSET(cpu, &all_cpus) != 0)
  720                         sc->sc_its_cols[cpu] = malloc(
  721                             sizeof(*sc->sc_its_cols[0]), M_GICV3_ITS,
  722                             M_WAITOK | M_ZERO);
  723                 else
  724                         sc->sc_its_cols[cpu] = NULL;
  725 
  726         /* Enable the ITS */
  727         gic_its_write_4(sc, GITS_CTLR,
  728             gic_its_read_4(sc, GITS_CTLR) | GITS_CTLR_EN);
  729 
  730         /* Create the LPI configuration table */
  731         gicv3_its_conftable_init(sc);
  732 
  733         /* And the pending tebles */
  734         gicv3_its_pendtables_init(sc);
  735 
  736         /* Enable LPIs on this CPU */
  737         its_init_cpu(dev, sc);
  738 
  739         TAILQ_INIT(&sc->sc_its_dev_list);
  740 
  741         /*
  742          * Create the vmem object to allocate IRQs from. We try to use all
  743          * IRQs not already used by the GICv3.
  744          * XXX: This assumes there are no other interrupt controllers in the
  745          * system.
  746          */
  747         sc->sc_irq_alloc = vmem_create("GICv3 ITS IRQs", 0,
  748             NIRQ - gicv3_get_nirqs(dev), 1, 1, M_FIRSTFIT | M_WAITOK);
  749 
  750         sc->sc_irqs = malloc(sizeof(*sc->sc_irqs) * LPI_NIRQS, M_GICV3_ITS,
  751             M_WAITOK | M_ZERO);
  752         name = device_get_nameunit(dev);
  753         for (i = 0; i < LPI_NIRQS; i++) {
  754                 sc->sc_irqs[i].gi_irq = i;
  755                 err = intr_isrc_register(&sc->sc_irqs[i].gi_isrc, dev, 0,
  756                     "%s,%u", name, i);
  757         }
  758 
  759         return (0);
  760 }
  761 
  762 static int
  763 gicv3_its_detach(device_t dev)
  764 {
  765 
  766         return (ENXIO);
  767 }
  768 
  769 static void
  770 its_quirk_cavium_22375(device_t dev)
  771 {
  772         struct gicv3_its_softc *sc;
  773 
  774         sc = device_get_softc(dev);
  775         sc->sc_its_flags |= ITS_FLAGS_ERRATA_CAVIUM_22375;
  776 }
  777 
  778 static void
  779 gicv3_its_disable_intr(device_t dev, struct intr_irqsrc *isrc)
  780 {
  781         struct gicv3_its_softc *sc;
  782         struct gicv3_its_irqsrc *girq;
  783         uint8_t *conf;
  784 
  785         sc = device_get_softc(dev);
  786         girq = (struct gicv3_its_irqsrc *)isrc;
  787         conf = (uint8_t *)sc->sc_conf_base;
  788 
  789         conf[girq->gi_irq] &= ~LPI_CONF_ENABLE;
  790 
  791         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
  792                 /* Clean D-cache under command. */
  793                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_irq], 1);
  794         } else {
  795                 /* DSB inner shareable, store */
  796                 dsb(ishst);
  797         }
  798 
  799         its_cmd_inv(dev, girq->gi_its_dev, girq);
  800 }
  801 
  802 static void
  803 gicv3_its_enable_intr(device_t dev, struct intr_irqsrc *isrc)
  804 {
  805         struct gicv3_its_softc *sc;
  806         struct gicv3_its_irqsrc *girq;
  807         uint8_t *conf;
  808 
  809         sc = device_get_softc(dev);
  810         girq = (struct gicv3_its_irqsrc *)isrc;
  811         conf = (uint8_t *)sc->sc_conf_base;
  812 
  813         conf[girq->gi_irq] |= LPI_CONF_ENABLE;
  814 
  815         if ((sc->sc_its_flags & ITS_FLAGS_LPI_CONF_FLUSH) != 0) {
  816                 /* Clean D-cache under command. */
  817                 cpu_dcache_wb_range((vm_offset_t)&conf[girq->gi_irq], 1);
  818         } else {
  819                 /* DSB inner shareable, store */
  820                 dsb(ishst);
  821         }
  822 
  823         its_cmd_inv(dev, girq->gi_its_dev, girq);
  824 }
  825 
  826 static int
  827 gicv3_its_intr(void *arg, uintptr_t irq)
  828 {
  829         struct gicv3_its_softc *sc = arg;
  830         struct gicv3_its_irqsrc *girq;
  831         struct trapframe *tf;
  832 
  833         irq -= GIC_FIRST_LPI;
  834         girq = &sc->sc_irqs[irq];
  835         if (girq == NULL)
  836                 panic("gicv3_its_intr: Invalid interrupt %ld",
  837                     irq + GIC_FIRST_LPI);
  838 
  839         tf = curthread->td_intr_frame;
  840         intr_isrc_dispatch(&girq->gi_isrc, tf);
  841         return (FILTER_HANDLED);
  842 }
  843 
  844 static void
  845 gicv3_its_pre_ithread(device_t dev, struct intr_irqsrc *isrc)
  846 {
  847         struct gicv3_its_irqsrc *girq;
  848 
  849         girq = (struct gicv3_its_irqsrc *)isrc;
  850         gicv3_its_disable_intr(dev, isrc);
  851         gic_icc_write(EOIR1, girq->gi_irq + GIC_FIRST_LPI);
  852 }
  853 
  854 static void
  855 gicv3_its_post_ithread(device_t dev, struct intr_irqsrc *isrc)
  856 {
  857 
  858         gicv3_its_enable_intr(dev, isrc);
  859 }
  860 
  861 static void
  862 gicv3_its_post_filter(device_t dev, struct intr_irqsrc *isrc)
  863 {
  864         struct gicv3_its_irqsrc *girq;
  865 
  866         girq = (struct gicv3_its_irqsrc *)isrc;
  867         gic_icc_write(EOIR1, girq->gi_irq + GIC_FIRST_LPI);
  868 }
  869 
  870 static int
  871 gicv3_its_bind_intr(device_t dev, struct intr_irqsrc *isrc)
  872 {
  873         struct gicv3_its_irqsrc *girq;
  874 
  875         girq = (struct gicv3_its_irqsrc *)isrc;
  876         if (CPU_EMPTY(&isrc->isrc_cpu)) {
  877                 gic_irq_cpu = intr_irq_next_cpu(gic_irq_cpu, &all_cpus);
  878                 CPU_SETOF(gic_irq_cpu, &isrc->isrc_cpu);
  879         }
  880 
  881         its_cmd_movi(dev, girq);
  882 
  883         return (0);
  884 }
  885 
  886 static int
  887 gicv3_its_map_intr(device_t dev, struct intr_map_data *data,
  888     struct intr_irqsrc **isrcp)
  889 {
  890 
  891         /*
  892          * This should never happen, we only call this function to map
  893          * interrupts found before the controller driver is ready.
  894          */
  895         panic("gicv3_its_map_intr: Unable to map a MSI interrupt");
  896 }
  897 
  898 static int
  899 gicv3_its_setup_intr(device_t dev, struct intr_irqsrc *isrc,
  900     struct resource *res, struct intr_map_data *data)
  901 {
  902 
  903         /* Bind the interrupt to a CPU */
  904         gicv3_its_bind_intr(dev, isrc);
  905 
  906         return (0);
  907 }
  908 
  909 #ifdef SMP
  910 static void
  911 gicv3_its_init_secondary(device_t dev)
  912 {
  913         struct gicv3_its_softc *sc;
  914 
  915         sc = device_get_softc(dev);
  916 
  917         /*
  918          * This is fatal as otherwise we may bind interrupts to this CPU.
  919          * We need a way to tell the interrupt framework to only bind to a
  920          * subset of given CPUs when it performs the shuffle.
  921          */
  922         if (its_init_cpu(dev, sc) != 0)
  923                 panic("gicv3_its_init_secondary: No usable ITS on CPU%d",
  924                     PCPU_GET(cpuid));
  925 }
  926 #endif
  927 
  928 static uint32_t
  929 its_get_devid(device_t pci_dev)
  930 {
  931         uintptr_t id;
  932 
  933         if (pci_get_id(pci_dev, PCI_ID_MSI, &id) != 0)
  934                 panic("its_get_devid: Unable to get the MSI DeviceID");
  935 
  936         return (id);
  937 }
  938 
  939 static struct its_dev *
  940 its_device_find(device_t dev, device_t child)
  941 {
  942         struct gicv3_its_softc *sc;
  943         struct its_dev *its_dev = NULL;
  944 
  945         sc = device_get_softc(dev);
  946 
  947         mtx_lock_spin(&sc->sc_its_dev_lock);
  948         TAILQ_FOREACH(its_dev, &sc->sc_its_dev_list, entry) {
  949                 if (its_dev->pci_dev == child)
  950                         break;
  951         }
  952         mtx_unlock_spin(&sc->sc_its_dev_lock);
  953 
  954         return (its_dev);
  955 }
  956 
  957 static struct its_dev *
  958 its_device_get(device_t dev, device_t child, u_int nvecs)
  959 {
  960         struct gicv3_its_softc *sc;
  961         struct its_dev *its_dev;
  962         vmem_addr_t irq_base;
  963         size_t esize;
  964 
  965         sc = device_get_softc(dev);
  966 
  967         its_dev = its_device_find(dev, child);
  968         if (its_dev != NULL)
  969                 return (its_dev);
  970 
  971         its_dev = malloc(sizeof(*its_dev), M_GICV3_ITS, M_NOWAIT | M_ZERO);
  972         if (its_dev == NULL)
  973                 return (NULL);
  974 
  975         its_dev->pci_dev = child;
  976         its_dev->devid = its_get_devid(child);
  977 
  978         its_dev->lpis.lpi_busy = 0;
  979         its_dev->lpis.lpi_num = nvecs;
  980         its_dev->lpis.lpi_free = nvecs;
  981 
  982         if (vmem_alloc(sc->sc_irq_alloc, nvecs, M_FIRSTFIT | M_NOWAIT,
  983             &irq_base) != 0) {
  984                 free(its_dev, M_GICV3_ITS);
  985                 return (NULL);
  986         }
  987         its_dev->lpis.lpi_base = irq_base;
  988 
  989         /* Get ITT entry size */
  990         esize = GITS_TYPER_ITTES(gic_its_read_8(sc, GITS_TYPER));
  991 
  992         /*
  993          * Allocate ITT for this device.
  994          * PA has to be 256 B aligned. At least two entries for device.
  995          */
  996         its_dev->itt_size = roundup2(MAX(nvecs, 2) * esize, 256);
  997         its_dev->itt = (vm_offset_t)contigmalloc(its_dev->itt_size,
  998             M_GICV3_ITS, M_NOWAIT | M_ZERO, 0, LPI_INT_TRANS_TAB_MAX_ADDR,
  999             LPI_INT_TRANS_TAB_ALIGN, 0);
 1000         if (its_dev->itt == 0) {
 1001                 vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base, nvecs);
 1002                 free(its_dev, M_GICV3_ITS);
 1003                 return (NULL);
 1004         }
 1005 
 1006         mtx_lock_spin(&sc->sc_its_dev_lock);
 1007         TAILQ_INSERT_TAIL(&sc->sc_its_dev_list, its_dev, entry);
 1008         mtx_unlock_spin(&sc->sc_its_dev_lock);
 1009 
 1010         /* Map device to its ITT */
 1011         its_cmd_mapd(dev, its_dev, 1);
 1012 
 1013         return (its_dev);
 1014 }
 1015 
 1016 static void
 1017 its_device_release(device_t dev, struct its_dev *its_dev)
 1018 {
 1019         struct gicv3_its_softc *sc;
 1020 
 1021         KASSERT(its_dev->lpis.lpi_busy == 0,
 1022             ("its_device_release: Trying to release an inuse ITS device"));
 1023 
 1024         /* Unmap device in ITS */
 1025         its_cmd_mapd(dev, its_dev, 0);
 1026 
 1027         sc = device_get_softc(dev);
 1028 
 1029         /* Remove the device from the list of devices */
 1030         mtx_lock_spin(&sc->sc_its_dev_lock);
 1031         TAILQ_REMOVE(&sc->sc_its_dev_list, its_dev, entry);
 1032         mtx_unlock_spin(&sc->sc_its_dev_lock);
 1033 
 1034         /* Free ITT */
 1035         KASSERT(its_dev->itt != 0, ("Invalid ITT in valid ITS device"));
 1036         contigfree((void *)its_dev->itt, its_dev->itt_size, M_GICV3_ITS);
 1037 
 1038         /* Free the IRQ allocation */
 1039         vmem_free(sc->sc_irq_alloc, its_dev->lpis.lpi_base,
 1040             its_dev->lpis.lpi_num);
 1041 
 1042         free(its_dev, M_GICV3_ITS);
 1043 }
 1044 
 1045 static int
 1046 gicv3_its_alloc_msi(device_t dev, device_t child, int count, int maxcount,
 1047     device_t *pic, struct intr_irqsrc **srcs)
 1048 {
 1049         struct gicv3_its_softc *sc;
 1050         struct gicv3_its_irqsrc *girq;
 1051         struct its_dev *its_dev;
 1052         u_int irq;
 1053         int i;
 1054 
 1055         its_dev = its_device_get(dev, child, count);
 1056         if (its_dev == NULL)
 1057                 return (ENXIO);
 1058 
 1059         KASSERT(its_dev->lpis.lpi_free >= count,
 1060             ("gicv3_its_alloc_msi: No free LPIs"));
 1061         sc = device_get_softc(dev);
 1062         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
 1063             its_dev->lpis.lpi_free;
 1064         for (i = 0; i < count; i++, irq++) {
 1065                 its_dev->lpis.lpi_free--;
 1066                 girq = &sc->sc_irqs[irq];
 1067                 girq->gi_its_dev = its_dev;
 1068                 srcs[i] = (struct intr_irqsrc *)girq;
 1069         }
 1070         its_dev->lpis.lpi_busy += count;
 1071         *pic = dev;
 1072 
 1073         return (0);
 1074 }
 1075 
 1076 static int
 1077 gicv3_its_release_msi(device_t dev, device_t child, int count,
 1078     struct intr_irqsrc **isrc)
 1079 {
 1080         struct gicv3_its_softc *sc;
 1081         struct gicv3_its_irqsrc *girq;
 1082         struct its_dev *its_dev;
 1083         int i;
 1084 
 1085         sc = device_get_softc(dev);
 1086         its_dev = its_device_find(dev, child);
 1087 
 1088         KASSERT(its_dev != NULL,
 1089             ("gicv3_its_release_msi: Releasing a MSI interrupt with "
 1090              "no ITS device"));
 1091         KASSERT(its_dev->lpis.lpi_busy >= count,
 1092             ("gicv3_its_release_msi: Releasing more interrupts than "
 1093              "were allocated: releasing %d, allocated %d", count,
 1094              its_dev->lpis.lpi_busy));
 1095         for (i = 0; i < count; i++) {
 1096                 girq = (struct gicv3_its_irqsrc *)isrc[i];
 1097                 girq->gi_its_dev = NULL;
 1098         }
 1099         its_dev->lpis.lpi_busy -= count;
 1100 
 1101         if (its_dev->lpis.lpi_busy == 0)
 1102                 its_device_release(dev, its_dev);
 1103 
 1104         return (0);
 1105 }
 1106 
 1107 static int
 1108 gicv3_its_alloc_msix(device_t dev, device_t child, device_t *pic,
 1109     struct intr_irqsrc **isrcp)
 1110 {
 1111         struct gicv3_its_softc *sc;
 1112         struct gicv3_its_irqsrc *girq;
 1113         struct its_dev *its_dev;
 1114         u_int nvecs, irq;
 1115 
 1116         nvecs = pci_msix_count(child);
 1117         its_dev = its_device_get(dev, child, nvecs);
 1118         if (its_dev == NULL)
 1119                 return (ENXIO);
 1120 
 1121         KASSERT(its_dev->lpis.lpi_free > 0,
 1122             ("gicv3_its_alloc_msix: No free LPIs"));
 1123         sc = device_get_softc(dev);
 1124         irq = its_dev->lpis.lpi_base + its_dev->lpis.lpi_num -
 1125             its_dev->lpis.lpi_free;
 1126         its_dev->lpis.lpi_free--;
 1127         its_dev->lpis.lpi_busy++;
 1128         girq = &sc->sc_irqs[irq];
 1129         girq->gi_its_dev = its_dev;
 1130 
 1131         *pic = dev;
 1132         *isrcp = (struct intr_irqsrc *)girq;
 1133 
 1134         return (0);
 1135 }
 1136 
 1137 static int
 1138 gicv3_its_release_msix(device_t dev, device_t child, struct intr_irqsrc *isrc)
 1139 {
 1140         struct gicv3_its_softc *sc;
 1141         struct gicv3_its_irqsrc *girq;
 1142         struct its_dev *its_dev;
 1143 
 1144         sc = device_get_softc(dev);
 1145         its_dev = its_device_find(dev, child);
 1146 
 1147         KASSERT(its_dev != NULL,
 1148             ("gicv3_its_release_msix: Releasing a MSI-X interrupt with "
 1149              "no ITS device"));
 1150         KASSERT(its_dev->lpis.lpi_busy > 0,
 1151             ("gicv3_its_release_msix: Releasing more interrupts than "
 1152              "were allocated: allocated %d", its_dev->lpis.lpi_busy));
 1153         girq = (struct gicv3_its_irqsrc *)isrc;
 1154         girq->gi_its_dev = NULL;
 1155         its_dev->lpis.lpi_busy--;
 1156 
 1157         if (its_dev->lpis.lpi_busy == 0)
 1158                 its_device_release(dev, its_dev);
 1159 
 1160         return (0);
 1161 }
 1162 
 1163 static int
 1164 gicv3_its_map_msi(device_t dev, device_t child, struct intr_irqsrc *isrc,
 1165     uint64_t *addr, uint32_t *data)
 1166 {
 1167         struct gicv3_its_softc *sc;
 1168         struct gicv3_its_irqsrc *girq;
 1169 
 1170         sc = device_get_softc(dev);
 1171         girq = (struct gicv3_its_irqsrc *)isrc;
 1172 
 1173         /* Map the message to the given IRQ */
 1174         its_cmd_mapti(dev, girq);
 1175 
 1176         *addr = vtophys(rman_get_virtual(sc->sc_its_res)) + GITS_TRANSLATER;
 1177         *data = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
 1178 
 1179         return (0);
 1180 }
 1181 
 1182 /*
 1183  * Commands handling.
 1184  */
 1185 
 1186 static __inline void
 1187 cmd_format_command(struct its_cmd *cmd, uint8_t cmd_type)
 1188 {
 1189         /* Command field: DW0 [7:0] */
 1190         cmd->cmd_dword[0] &= htole64(~CMD_COMMAND_MASK);
 1191         cmd->cmd_dword[0] |= htole64(cmd_type);
 1192 }
 1193 
 1194 static __inline void
 1195 cmd_format_devid(struct its_cmd *cmd, uint32_t devid)
 1196 {
 1197         /* Device ID field: DW0 [63:32] */
 1198         cmd->cmd_dword[0] &= htole64(~CMD_DEVID_MASK);
 1199         cmd->cmd_dword[0] |= htole64((uint64_t)devid << CMD_DEVID_SHIFT);
 1200 }
 1201 
 1202 static __inline void
 1203 cmd_format_size(struct its_cmd *cmd, uint16_t size)
 1204 {
 1205         /* Size field: DW1 [4:0] */
 1206         cmd->cmd_dword[1] &= htole64(~CMD_SIZE_MASK);
 1207         cmd->cmd_dword[1] |= htole64((size & CMD_SIZE_MASK));
 1208 }
 1209 
 1210 static __inline void
 1211 cmd_format_id(struct its_cmd *cmd, uint32_t id)
 1212 {
 1213         /* ID field: DW1 [31:0] */
 1214         cmd->cmd_dword[1] &= htole64(~CMD_ID_MASK);
 1215         cmd->cmd_dword[1] |= htole64(id);
 1216 }
 1217 
 1218 static __inline void
 1219 cmd_format_pid(struct its_cmd *cmd, uint32_t pid)
 1220 {
 1221         /* Physical ID field: DW1 [63:32] */
 1222         cmd->cmd_dword[1] &= htole64(~CMD_PID_MASK);
 1223         cmd->cmd_dword[1] |= htole64((uint64_t)pid << CMD_PID_SHIFT);
 1224 }
 1225 
 1226 static __inline void
 1227 cmd_format_col(struct its_cmd *cmd, uint16_t col_id)
 1228 {
 1229         /* Collection field: DW2 [16:0] */
 1230         cmd->cmd_dword[2] &= htole64(~CMD_COL_MASK);
 1231         cmd->cmd_dword[2] |= htole64(col_id);
 1232 }
 1233 
 1234 static __inline void
 1235 cmd_format_target(struct its_cmd *cmd, uint64_t target)
 1236 {
 1237         /* Target Address field: DW2 [47:16] */
 1238         cmd->cmd_dword[2] &= htole64(~CMD_TARGET_MASK);
 1239         cmd->cmd_dword[2] |= htole64(target & CMD_TARGET_MASK);
 1240 }
 1241 
 1242 static __inline void
 1243 cmd_format_itt(struct its_cmd *cmd, uint64_t itt)
 1244 {
 1245         /* ITT Address field: DW2 [47:8] */
 1246         cmd->cmd_dword[2] &= htole64(~CMD_ITT_MASK);
 1247         cmd->cmd_dword[2] |= htole64(itt & CMD_ITT_MASK);
 1248 }
 1249 
 1250 static __inline void
 1251 cmd_format_valid(struct its_cmd *cmd, uint8_t valid)
 1252 {
 1253         /* Valid field: DW2 [63] */
 1254         cmd->cmd_dword[2] &= htole64(~CMD_VALID_MASK);
 1255         cmd->cmd_dword[2] |= htole64((uint64_t)valid << CMD_VALID_SHIFT);
 1256 }
 1257 
 1258 static inline bool
 1259 its_cmd_queue_full(struct gicv3_its_softc *sc)
 1260 {
 1261         size_t read_idx, next_write_idx;
 1262 
 1263         /* Get the index of the next command */
 1264         next_write_idx = (sc->sc_its_cmd_next_idx + 1) %
 1265             (ITS_CMDQ_SIZE / sizeof(struct its_cmd));
 1266         /* And the index of the current command being read */
 1267         read_idx = gic_its_read_4(sc, GITS_CREADR) / sizeof(struct its_cmd);
 1268 
 1269         /*
 1270          * The queue is full when the write offset points
 1271          * at the command before the current read offset.
 1272          */
 1273         return (next_write_idx == read_idx);
 1274 }
 1275 
 1276 static inline void
 1277 its_cmd_sync(struct gicv3_its_softc *sc, struct its_cmd *cmd)
 1278 {
 1279 
 1280         if ((sc->sc_its_flags & ITS_FLAGS_CMDQ_FLUSH) != 0) {
 1281                 /* Clean D-cache under command. */
 1282                 cpu_dcache_wb_range((vm_offset_t)cmd, sizeof(*cmd));
 1283         } else {
 1284                 /* DSB inner shareable, store */
 1285                 dsb(ishst);
 1286         }
 1287 
 1288 }
 1289 
 1290 static inline uint64_t
 1291 its_cmd_cwriter_offset(struct gicv3_its_softc *sc, struct its_cmd *cmd)
 1292 {
 1293         uint64_t off;
 1294 
 1295         off = (cmd - sc->sc_its_cmd_base) * sizeof(*cmd);
 1296 
 1297         return (off);
 1298 }
 1299 
 1300 static void
 1301 its_cmd_wait_completion(device_t dev, struct its_cmd *cmd_first,
 1302     struct its_cmd *cmd_last)
 1303 {
 1304         struct gicv3_its_softc *sc;
 1305         uint64_t first, last, read;
 1306         size_t us_left;
 1307 
 1308         sc = device_get_softc(dev);
 1309 
 1310         /*
 1311          * XXX ARM64TODO: This is obviously a significant delay.
 1312          * The reason for that is that currently the time frames for
 1313          * the command to complete are not known.
 1314          */
 1315         us_left = 1000000;
 1316 
 1317         first = its_cmd_cwriter_offset(sc, cmd_first);
 1318         last = its_cmd_cwriter_offset(sc, cmd_last);
 1319 
 1320         for (;;) {
 1321                 read = gic_its_read_8(sc, GITS_CREADR);
 1322                 if (first < last) {
 1323                         if (read < first || read >= last)
 1324                                 break;
 1325                 } else if (read < first && read >= last)
 1326                         break;
 1327 
 1328                 if (us_left-- == 0) {
 1329                         /* This means timeout */
 1330                         device_printf(dev,
 1331                             "Timeout while waiting for CMD completion.\n");
 1332                         return;
 1333                 }
 1334                 DELAY(1);
 1335         }
 1336 }
 1337 
 1338 
 1339 static struct its_cmd *
 1340 its_cmd_alloc_locked(device_t dev)
 1341 {
 1342         struct gicv3_its_softc *sc;
 1343         struct its_cmd *cmd;
 1344         size_t us_left;
 1345 
 1346         sc = device_get_softc(dev);
 1347 
 1348         /*
 1349          * XXX ARM64TODO: This is obviously a significant delay.
 1350          * The reason for that is that currently the time frames for
 1351          * the command to complete (and therefore free the descriptor)
 1352          * are not known.
 1353          */
 1354         us_left = 1000000;
 1355 
 1356         mtx_assert(&sc->sc_its_cmd_lock, MA_OWNED);
 1357         while (its_cmd_queue_full(sc)) {
 1358                 if (us_left-- == 0) {
 1359                         /* Timeout while waiting for free command */
 1360                         device_printf(dev,
 1361                             "Timeout while waiting for free command\n");
 1362                         return (NULL);
 1363                 }
 1364                 DELAY(1);
 1365         }
 1366 
 1367         cmd = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
 1368         sc->sc_its_cmd_next_idx++;
 1369         sc->sc_its_cmd_next_idx %= ITS_CMDQ_SIZE / sizeof(struct its_cmd);
 1370 
 1371         return (cmd);
 1372 }
 1373 
 1374 static uint64_t
 1375 its_cmd_prepare(struct its_cmd *cmd, struct its_cmd_desc *desc)
 1376 {
 1377         uint64_t target;
 1378         uint8_t cmd_type;
 1379         u_int size;
 1380         boolean_t error;
 1381 
 1382         error = FALSE;
 1383         cmd_type = desc->cmd_type;
 1384         target = ITS_TARGET_NONE;
 1385 
 1386         switch (cmd_type) {
 1387         case ITS_CMD_MOVI:      /* Move interrupt ID to another collection */
 1388                 target = desc->cmd_desc_movi.col->col_target;
 1389                 cmd_format_command(cmd, ITS_CMD_MOVI);
 1390                 cmd_format_id(cmd, desc->cmd_desc_movi.id);
 1391                 cmd_format_col(cmd, desc->cmd_desc_movi.col->col_id);
 1392                 cmd_format_devid(cmd, desc->cmd_desc_movi.its_dev->devid);
 1393                 break;
 1394         case ITS_CMD_SYNC:      /* Wait for previous commands completion */
 1395                 target = desc->cmd_desc_sync.col->col_target;
 1396                 cmd_format_command(cmd, ITS_CMD_SYNC);
 1397                 cmd_format_target(cmd, target);
 1398                 break;
 1399         case ITS_CMD_MAPD:      /* Assign ITT to device */
 1400                 cmd_format_command(cmd, ITS_CMD_MAPD);
 1401                 cmd_format_itt(cmd, vtophys(desc->cmd_desc_mapd.its_dev->itt));
 1402                 /*
 1403                  * Size describes number of bits to encode interrupt IDs
 1404                  * supported by the device minus one.
 1405                  * When V (valid) bit is zero, this field should be written
 1406                  * as zero.
 1407                  */
 1408                 if (desc->cmd_desc_mapd.valid != 0) {
 1409                         size = fls(desc->cmd_desc_mapd.its_dev->lpis.lpi_num);
 1410                         size = MAX(1, size) - 1;
 1411                 } else
 1412                         size = 0;
 1413 
 1414                 cmd_format_size(cmd, size);
 1415                 cmd_format_devid(cmd, desc->cmd_desc_mapd.its_dev->devid);
 1416                 cmd_format_valid(cmd, desc->cmd_desc_mapd.valid);
 1417                 break;
 1418         case ITS_CMD_MAPC:      /* Map collection to Re-Distributor */
 1419                 target = desc->cmd_desc_mapc.col->col_target;
 1420                 cmd_format_command(cmd, ITS_CMD_MAPC);
 1421                 cmd_format_col(cmd, desc->cmd_desc_mapc.col->col_id);
 1422                 cmd_format_valid(cmd, desc->cmd_desc_mapc.valid);
 1423                 cmd_format_target(cmd, target);
 1424                 break;
 1425         case ITS_CMD_MAPTI:
 1426                 target = desc->cmd_desc_mapvi.col->col_target;
 1427                 cmd_format_command(cmd, ITS_CMD_MAPTI);
 1428                 cmd_format_devid(cmd, desc->cmd_desc_mapvi.its_dev->devid);
 1429                 cmd_format_id(cmd, desc->cmd_desc_mapvi.id);
 1430                 cmd_format_pid(cmd, desc->cmd_desc_mapvi.pid);
 1431                 cmd_format_col(cmd, desc->cmd_desc_mapvi.col->col_id);
 1432                 break;
 1433         case ITS_CMD_MAPI:
 1434                 target = desc->cmd_desc_mapi.col->col_target;
 1435                 cmd_format_command(cmd, ITS_CMD_MAPI);
 1436                 cmd_format_devid(cmd, desc->cmd_desc_mapi.its_dev->devid);
 1437                 cmd_format_id(cmd, desc->cmd_desc_mapi.pid);
 1438                 cmd_format_col(cmd, desc->cmd_desc_mapi.col->col_id);
 1439                 break;
 1440         case ITS_CMD_INV:
 1441                 target = desc->cmd_desc_inv.col->col_target;
 1442                 cmd_format_command(cmd, ITS_CMD_INV);
 1443                 cmd_format_devid(cmd, desc->cmd_desc_inv.its_dev->devid);
 1444                 cmd_format_id(cmd, desc->cmd_desc_inv.pid);
 1445                 break;
 1446         case ITS_CMD_INVALL:
 1447                 cmd_format_command(cmd, ITS_CMD_INVALL);
 1448                 cmd_format_col(cmd, desc->cmd_desc_invall.col->col_id);
 1449                 break;
 1450         default:
 1451                 panic("its_cmd_prepare: Invalid command: %x", cmd_type);
 1452         }
 1453 
 1454         return (target);
 1455 }
 1456 
 1457 static int
 1458 its_cmd_send(device_t dev, struct its_cmd_desc *desc)
 1459 {
 1460         struct gicv3_its_softc *sc;
 1461         struct its_cmd *cmd, *cmd_sync, *cmd_write;
 1462         struct its_col col_sync;
 1463         struct its_cmd_desc desc_sync;
 1464         uint64_t target, cwriter;
 1465 
 1466         sc = device_get_softc(dev);
 1467         mtx_lock_spin(&sc->sc_its_cmd_lock);
 1468         cmd = its_cmd_alloc_locked(dev);
 1469         if (cmd == NULL) {
 1470                 device_printf(dev, "could not allocate ITS command\n");
 1471                 mtx_unlock_spin(&sc->sc_its_cmd_lock);
 1472                 return (EBUSY);
 1473         }
 1474 
 1475         target = its_cmd_prepare(cmd, desc);
 1476         its_cmd_sync(sc, cmd);
 1477 
 1478         if (target != ITS_TARGET_NONE) {
 1479                 cmd_sync = its_cmd_alloc_locked(dev);
 1480                 if (cmd_sync != NULL) {
 1481                         desc_sync.cmd_type = ITS_CMD_SYNC;
 1482                         col_sync.col_target = target;
 1483                         desc_sync.cmd_desc_sync.col = &col_sync;
 1484                         its_cmd_prepare(cmd_sync, &desc_sync);
 1485                         its_cmd_sync(sc, cmd_sync);
 1486                 }
 1487         }
 1488 
 1489         /* Update GITS_CWRITER */
 1490         cwriter = sc->sc_its_cmd_next_idx * sizeof(struct its_cmd);
 1491         gic_its_write_8(sc, GITS_CWRITER, cwriter);
 1492         cmd_write = &sc->sc_its_cmd_base[sc->sc_its_cmd_next_idx];
 1493         mtx_unlock_spin(&sc->sc_its_cmd_lock);
 1494 
 1495         its_cmd_wait_completion(dev, cmd, cmd_write);
 1496 
 1497         return (0);
 1498 }
 1499 
 1500 /* Handlers to send commands */
 1501 static void
 1502 its_cmd_movi(device_t dev, struct gicv3_its_irqsrc *girq)
 1503 {
 1504         struct gicv3_its_softc *sc;
 1505         struct its_cmd_desc desc;
 1506         struct its_col *col;
 1507 
 1508         sc = device_get_softc(dev);
 1509         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
 1510 
 1511         desc.cmd_type = ITS_CMD_MOVI;
 1512         desc.cmd_desc_movi.its_dev = girq->gi_its_dev;
 1513         desc.cmd_desc_movi.col = col;
 1514         desc.cmd_desc_movi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
 1515 
 1516         its_cmd_send(dev, &desc);
 1517 }
 1518 
 1519 static void
 1520 its_cmd_mapc(device_t dev, struct its_col *col, uint8_t valid)
 1521 {
 1522         struct its_cmd_desc desc;
 1523 
 1524         desc.cmd_type = ITS_CMD_MAPC;
 1525         desc.cmd_desc_mapc.col = col;
 1526         /*
 1527          * Valid bit set - map the collection.
 1528          * Valid bit cleared - unmap the collection.
 1529          */
 1530         desc.cmd_desc_mapc.valid = valid;
 1531 
 1532         its_cmd_send(dev, &desc);
 1533 }
 1534 
 1535 static void
 1536 its_cmd_mapti(device_t dev, struct gicv3_its_irqsrc *girq)
 1537 {
 1538         struct gicv3_its_softc *sc;
 1539         struct its_cmd_desc desc;
 1540         struct its_col *col;
 1541         u_int col_id;
 1542 
 1543         sc = device_get_softc(dev);
 1544 
 1545         col_id = CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1;
 1546         col = sc->sc_its_cols[col_id];
 1547 
 1548         desc.cmd_type = ITS_CMD_MAPTI;
 1549         desc.cmd_desc_mapvi.its_dev = girq->gi_its_dev;
 1550         desc.cmd_desc_mapvi.col = col;
 1551         /* The EventID sent to the device */
 1552         desc.cmd_desc_mapvi.id = girq->gi_irq - girq->gi_its_dev->lpis.lpi_base;
 1553         /* The physical interrupt presented to softeware */
 1554         desc.cmd_desc_mapvi.pid = girq->gi_irq + GIC_FIRST_LPI;
 1555 
 1556         its_cmd_send(dev, &desc);
 1557 }
 1558 
 1559 static void
 1560 its_cmd_mapd(device_t dev, struct its_dev *its_dev, uint8_t valid)
 1561 {
 1562         struct its_cmd_desc desc;
 1563 
 1564         desc.cmd_type = ITS_CMD_MAPD;
 1565         desc.cmd_desc_mapd.its_dev = its_dev;
 1566         desc.cmd_desc_mapd.valid = valid;
 1567 
 1568         its_cmd_send(dev, &desc);
 1569 }
 1570 
 1571 static void
 1572 its_cmd_inv(device_t dev, struct its_dev *its_dev,
 1573     struct gicv3_its_irqsrc *girq)
 1574 {
 1575         struct gicv3_its_softc *sc;
 1576         struct its_cmd_desc desc;
 1577         struct its_col *col;
 1578 
 1579         sc = device_get_softc(dev);
 1580         col = sc->sc_its_cols[CPU_FFS(&girq->gi_isrc.isrc_cpu) - 1];
 1581 
 1582         desc.cmd_type = ITS_CMD_INV;
 1583         /* The EventID sent to the device */
 1584         desc.cmd_desc_inv.pid = girq->gi_irq - its_dev->lpis.lpi_base;
 1585         desc.cmd_desc_inv.its_dev = its_dev;
 1586         desc.cmd_desc_inv.col = col;
 1587 
 1588         its_cmd_send(dev, &desc);
 1589 }
 1590 
 1591 static void
 1592 its_cmd_invall(device_t dev, struct its_col *col)
 1593 {
 1594         struct its_cmd_desc desc;
 1595 
 1596         desc.cmd_type = ITS_CMD_INVALL;
 1597         desc.cmd_desc_invall.col = col;
 1598 
 1599         its_cmd_send(dev, &desc);
 1600 }
 1601 
 1602 #ifdef FDT
 1603 static device_probe_t gicv3_its_fdt_probe;
 1604 static device_attach_t gicv3_its_fdt_attach;
 1605 
 1606 static device_method_t gicv3_its_fdt_methods[] = {
 1607         /* Device interface */
 1608         DEVMETHOD(device_probe,         gicv3_its_fdt_probe),
 1609         DEVMETHOD(device_attach,        gicv3_its_fdt_attach),
 1610 
 1611         /* End */
 1612         DEVMETHOD_END
 1613 };
 1614 
 1615 #define its_baseclasses its_fdt_baseclasses
 1616 DEFINE_CLASS_1(its, gicv3_its_fdt_driver, gicv3_its_fdt_methods,
 1617     sizeof(struct gicv3_its_softc), gicv3_its_driver);
 1618 #undef its_baseclasses
 1619 static devclass_t gicv3_its_fdt_devclass;
 1620 
 1621 EARLY_DRIVER_MODULE(its, gic, gicv3_its_fdt_driver,
 1622     gicv3_its_fdt_devclass, 0, 0, BUS_PASS_INTERRUPT + BUS_PASS_ORDER_MIDDLE);
 1623 
 1624 static int
 1625 gicv3_its_fdt_probe(device_t dev)
 1626 {
 1627 
 1628         if (!ofw_bus_status_okay(dev))
 1629                 return (ENXIO);
 1630 
 1631         if (!ofw_bus_is_compatible(dev, "arm,gic-v3-its"))
 1632                 return (ENXIO);
 1633 
 1634         device_set_desc(dev, "ARM GIC Interrupt Translation Service");
 1635         return (BUS_PROBE_DEFAULT);
 1636 }
 1637 
 1638 static int
 1639 gicv3_its_fdt_attach(device_t dev)
 1640 {
 1641         struct gicv3_its_softc *sc;
 1642         phandle_t xref;
 1643         int err;
 1644 
 1645         err = gicv3_its_attach(dev);
 1646         if (err != 0)
 1647                 return (err);
 1648 
 1649         sc = device_get_softc(dev);
 1650 
 1651         /* Register this device as a interrupt controller */
 1652         xref = OF_xref_from_node(ofw_bus_get_node(dev));
 1653         sc->sc_pic = intr_pic_register(dev, xref);
 1654         intr_pic_add_handler(device_get_parent(dev), sc->sc_pic,
 1655             gicv3_its_intr, sc, GIC_FIRST_LPI, LPI_NIRQS);
 1656 
 1657         /* Register this device to handle MSI interrupts */
 1658         intr_msi_register(dev, xref);
 1659 
 1660         return (0);
 1661 }
 1662 #endif

Cache object: dd3f2450c0a70202df854fad36d35f6f


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.