The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/vmware/vmci/vmci_defs.h

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * Copyright (c) 2018 VMware, Inc.
    3  *
    4  * SPDX-License-Identifier: (BSD-2-Clause OR GPL-2.0)
    5  *
    6  * $FreeBSD$
    7  */
    8 
    9 #ifndef _VMCI_DEFS_H_
   10 #define _VMCI_DEFS_H_
   11 
   12 #include <sys/types.h>
   13 #include <machine/atomic.h>
   14 
   15 #include "vmci_kernel_defs.h"
   16 
   17 #pragma GCC diagnostic ignored "-Wcast-qual"
   18 
   19 /* Register offsets. */
   20 #define VMCI_STATUS_ADDR                0x00
   21 #define VMCI_CONTROL_ADDR               0x04
   22 #define VMCI_ICR_ADDR                   0x08
   23 #define VMCI_IMR_ADDR                   0x0c
   24 #define VMCI_DATA_OUT_ADDR              0x10
   25 #define VMCI_DATA_IN_ADDR               0x14
   26 #define VMCI_CAPS_ADDR                  0x18
   27 #define VMCI_RESULT_LOW_ADDR            0x1c
   28 #define VMCI_RESULT_HIGH_ADDR           0x20
   29 
   30 /* Status register bits. */
   31 #define VMCI_STATUS_INT_ON              0x1
   32 
   33 /* Control register bits. */
   34 #define VMCI_CONTROL_RESET              0x1
   35 #define VMCI_CONTROL_INT_ENABLE         0x2
   36 #define VMCI_CONTROL_INT_DISABLE        0x4
   37 
   38 /* Capabilities register bits. */
   39 #define VMCI_CAPS_HYPERCALL             0x1
   40 #define VMCI_CAPS_GUESTCALL             0x2
   41 #define VMCI_CAPS_DATAGRAM              0x4
   42 #define VMCI_CAPS_NOTIFICATIONS         0x8
   43 
   44 /* Interrupt Cause register bits. */
   45 #define VMCI_ICR_DATAGRAM               0x1
   46 #define VMCI_ICR_NOTIFICATION           0x2
   47 
   48 /* Interrupt Mask register bits. */
   49 #define VMCI_IMR_DATAGRAM               0x1
   50 #define VMCI_IMR_NOTIFICATION           0x2
   51 
   52 /* Interrupt type. */
   53 typedef enum vmci_intr_type {
   54         VMCI_INTR_TYPE_INTX =   0,
   55         VMCI_INTR_TYPE_MSI =    1,
   56         VMCI_INTR_TYPE_MSIX =   2
   57 } vmci_intr_type;
   58 
   59 /*
   60  * Maximum MSI/MSI-X interrupt vectors in the device.
   61  */
   62 #define VMCI_MAX_INTRS                  2
   63 
   64 /*
   65  * Supported interrupt vectors. There is one for each ICR value above,
   66  * but here they indicate the position in the vector array/message ID.
   67  */
   68 #define VMCI_INTR_DATAGRAM              0
   69 #define VMCI_INTR_NOTIFICATION          1
   70 
   71 /*
   72  * A single VMCI device has an upper limit of 128 MiB on the amount of
   73  * memory that can be used for queue pairs.
   74  */
   75 #define VMCI_MAX_GUEST_QP_MEMORY        (128 * 1024 * 1024)
   76 
   77 /*
   78  * We have a fixed set of resource IDs available in the VMX.
   79  * This allows us to have a very simple implementation since we statically
   80  * know how many will create datagram handles. If a new caller arrives and
   81  * we have run out of slots we can manually increment the maximum size of
   82  * available resource IDs.
   83  */
   84 
   85 typedef uint32_t vmci_resource;
   86 
   87 /* VMCI reserved hypervisor datagram resource IDs. */
   88 #define VMCI_RESOURCES_QUERY            0
   89 #define VMCI_GET_CONTEXT_ID             1
   90 #define VMCI_SET_NOTIFY_BITMAP          2
   91 #define VMCI_DOORBELL_LINK              3
   92 #define VMCI_DOORBELL_UNLINK            4
   93 #define VMCI_DOORBELL_NOTIFY            5
   94 /*
   95  * VMCI_DATAGRAM_REQUEST_MAP and VMCI_DATAGRAM_REMOVE_MAP are
   96  * obsoleted by the removal of VM to VM communication.
   97  */
   98 #define VMCI_DATAGRAM_REQUEST_MAP       6
   99 #define VMCI_DATAGRAM_REMOVE_MAP        7
  100 #define VMCI_EVENT_SUBSCRIBE            8
  101 #define VMCI_EVENT_UNSUBSCRIBE          9
  102 #define VMCI_QUEUEPAIR_ALLOC            10
  103 #define VMCI_QUEUEPAIR_DETACH           11
  104 /*
  105  * VMCI_VSOCK_VMX_LOOKUP was assigned to 12 for Fusion 3.0/3.1,
  106  * WS 7.0/7.1 and ESX 4.1
  107  */
  108 #define VMCI_HGFS_TRANSPORT             13
  109 #define VMCI_UNITY_PBRPC_REGISTER       14
  110 /*
  111  * This resource is used for VMCI socket control packets sent to the
  112  * hypervisor (CID 0) because RID 1 is already reserved.
  113  */
  114 #define VSOCK_PACKET_HYPERVISOR_RID     15
  115 #define VMCI_RESOURCE_MAX               16
  116 /*
  117  * The core VMCI device functionality only requires the resource IDs of
  118  * VMCI_QUEUEPAIR_DETACH and below.
  119  */
  120 #define VMCI_CORE_DEVICE_RESOURCE_MAX   VMCI_QUEUEPAIR_DETACH
  121 
  122 /*
  123  * VMCI reserved host datagram resource IDs.
  124  * vsock control channel has resource id 1.
  125  */
  126 #define VMCI_DVFILTER_DATA_PATH_DATAGRAM        2
  127 
  128 /* VMCI Ids. */
  129 typedef uint32_t vmci_id;
  130 
  131 struct vmci_id_range {
  132         int8_t  action; /* VMCI_FA_X, for use in filters. */
  133         vmci_id begin;  /* Beginning of range. */
  134         vmci_id end;    /* End of range. */
  135 };
  136 
  137 struct vmci_handle {
  138         vmci_id context;
  139         vmci_id resource;
  140 };
  141 
  142 static inline struct vmci_handle
  143 VMCI_MAKE_HANDLE(vmci_id cid, vmci_id rid)
  144 {
  145         struct vmci_handle h;
  146 
  147         h.context = cid;
  148         h.resource = rid;
  149         return (h);
  150 }
  151 
  152 #define VMCI_HANDLE_TO_CONTEXT_ID(_handle)                              \
  153         ((_handle).context)
  154 #define VMCI_HANDLE_TO_RESOURCE_ID(_handle)                             \
  155         ((_handle).resource)
  156 #define VMCI_HANDLE_EQUAL(_h1, _h2)                                     \
  157         ((_h1).context == (_h2).context && (_h1).resource == (_h2).resource)
  158 
  159 #define VMCI_INVALID_ID                 0xFFFFFFFF
  160 static const struct vmci_handle VMCI_INVALID_HANDLE = {VMCI_INVALID_ID,
  161             VMCI_INVALID_ID};
  162 
  163 #define VMCI_HANDLE_INVALID(_handle)                                    \
  164         VMCI_HANDLE_EQUAL((_handle), VMCI_INVALID_HANDLE)
  165 
  166 /*
  167  * The below defines can be used to send anonymous requests.
  168  * This also indicates that no response is expected.
  169  */
  170 #define VMCI_ANON_SRC_CONTEXT_ID                                        \
  171         VMCI_INVALID_ID
  172 #define VMCI_ANON_SRC_RESOURCE_ID                                       \
  173         VMCI_INVALID_ID
  174 #define VMCI_ANON_SRC_HANDLE                                            \
  175         VMCI_MAKE_HANDLE(VMCI_ANON_SRC_CONTEXT_ID,                      \
  176         VMCI_ANON_SRC_RESOURCE_ID)
  177 
  178 /* The lowest 16 context ids are reserved for internal use. */
  179 #define VMCI_RESERVED_CID_LIMIT         16
  180 
  181 /*
  182  * Hypervisor context id, used for calling into hypervisor
  183  * supplied services from the VM.
  184  */
  185 #define VMCI_HYPERVISOR_CONTEXT_ID      0
  186 
  187 /*
  188  * Well-known context id, a logical context that contains a set of
  189  * well-known services. This context ID is now obsolete.
  190  */
  191 #define VMCI_WELL_KNOWN_CONTEXT_ID      1
  192 
  193 /*
  194  * Context ID used by host endpoints.
  195  */
  196 #define VMCI_HOST_CONTEXT_ID            2
  197 #define VMCI_HOST_CONTEXT_INVALID_EVENT ((uintptr_t)~0)
  198 
  199 #define VMCI_CONTEXT_IS_VM(_cid)                                        \
  200         (VMCI_INVALID_ID != _cid && _cid > VMCI_HOST_CONTEXT_ID)
  201 
  202 /*
  203  * The VMCI_CONTEXT_RESOURCE_ID is used together with VMCI_MAKE_HANDLE to make
  204  * handles that refer to a specific context.
  205  */
  206 #define VMCI_CONTEXT_RESOURCE_ID        0
  207 
  208 /*
  209  *------------------------------------------------------------------------------
  210  *
  211  * VMCI error codes.
  212  *
  213  *------------------------------------------------------------------------------
  214  */
  215 
  216 #define VMCI_SUCCESS_QUEUEPAIR_ATTACH           5
  217 #define VMCI_SUCCESS_QUEUEPAIR_CREATE           4
  218 #define VMCI_SUCCESS_LAST_DETACH                3
  219 #define VMCI_SUCCESS_ACCESS_GRANTED             2
  220 #define VMCI_SUCCESS_ENTRY_DEAD                 1
  221 #define VMCI_SUCCESS                            0LL
  222 #define VMCI_ERROR_INVALID_RESOURCE             (-1)
  223 #define VMCI_ERROR_INVALID_ARGS                 (-2)
  224 #define VMCI_ERROR_NO_MEM                       (-3)
  225 #define VMCI_ERROR_DATAGRAM_FAILED              (-4)
  226 #define VMCI_ERROR_MORE_DATA                    (-5)
  227 #define VMCI_ERROR_NO_MORE_DATAGRAMS            (-6)
  228 #define VMCI_ERROR_NO_ACCESS                    (-7)
  229 #define VMCI_ERROR_NO_HANDLE                    (-8)
  230 #define VMCI_ERROR_DUPLICATE_ENTRY              (-9)
  231 #define VMCI_ERROR_DST_UNREACHABLE              (-10)
  232 #define VMCI_ERROR_PAYLOAD_TOO_LARGE            (-11)
  233 #define VMCI_ERROR_INVALID_PRIV                 (-12)
  234 #define VMCI_ERROR_GENERIC                      (-13)
  235 #define VMCI_ERROR_PAGE_ALREADY_SHARED          (-14)
  236 #define VMCI_ERROR_CANNOT_SHARE_PAGE            (-15)
  237 #define VMCI_ERROR_CANNOT_UNSHARE_PAGE          (-16)
  238 #define VMCI_ERROR_NO_PROCESS                   (-17)
  239 #define VMCI_ERROR_NO_DATAGRAM                  (-18)
  240 #define VMCI_ERROR_NO_RESOURCES                 (-19)
  241 #define VMCI_ERROR_UNAVAILABLE                  (-20)
  242 #define VMCI_ERROR_NOT_FOUND                    (-21)
  243 #define VMCI_ERROR_ALREADY_EXISTS               (-22)
  244 #define VMCI_ERROR_NOT_PAGE_ALIGNED             (-23)
  245 #define VMCI_ERROR_INVALID_SIZE                 (-24)
  246 #define VMCI_ERROR_REGION_ALREADY_SHARED        (-25)
  247 #define VMCI_ERROR_TIMEOUT                      (-26)
  248 #define VMCI_ERROR_DATAGRAM_INCOMPLETE          (-27)
  249 #define VMCI_ERROR_INCORRECT_IRQL               (-28)
  250 #define VMCI_ERROR_EVENT_UNKNOWN                (-29)
  251 #define VMCI_ERROR_OBSOLETE                     (-30)
  252 #define VMCI_ERROR_QUEUEPAIR_MISMATCH           (-31)
  253 #define VMCI_ERROR_QUEUEPAIR_NOTSET             (-32)
  254 #define VMCI_ERROR_QUEUEPAIR_NOTOWNER           (-33)
  255 #define VMCI_ERROR_QUEUEPAIR_NOTATTACHED        (-34)
  256 #define VMCI_ERROR_QUEUEPAIR_NOSPACE            (-35)
  257 #define VMCI_ERROR_QUEUEPAIR_NODATA             (-36)
  258 #define VMCI_ERROR_BUSMEM_INVALIDATION          (-37)
  259 #define VMCI_ERROR_MODULE_NOT_LOADED            (-38)
  260 #define VMCI_ERROR_DEVICE_NOT_FOUND             (-39)
  261 #define VMCI_ERROR_QUEUEPAIR_NOT_READY          (-40)
  262 #define VMCI_ERROR_WOULD_BLOCK                  (-41)
  263 
  264 /* VMCI clients should return error code withing this range */
  265 #define VMCI_ERROR_CLIENT_MIN                   (-500)
  266 #define VMCI_ERROR_CLIENT_MAX                   (-550)
  267 
  268 /* Internal error codes. */
  269 #define VMCI_SHAREDMEM_ERROR_BAD_CONTEXT        (-1000)
  270 
  271 #define VMCI_PATH_MAX                           256
  272 
  273 /* VMCI reserved events. */
  274 typedef uint32_t vmci_event_type;
  275 
  276 #define VMCI_EVENT_CTX_ID_UPDATE        0       // Only applicable to guest
  277                                                 // endpoints
  278 #define VMCI_EVENT_CTX_REMOVED          1       // Applicable to guest and host
  279 #define VMCI_EVENT_QP_RESUMED           2       // Only applicable to guest
  280                                                 // endpoints
  281 #define VMCI_EVENT_QP_PEER_ATTACH       3       // Applicable to guest, host
  282                                                 // and VMX
  283 #define VMCI_EVENT_QP_PEER_DETACH       4       // Applicable to guest, host
  284                                                 // and VMX
  285 #define VMCI_EVENT_MEM_ACCESS_ON        5       // Applicable to VMX and vmk. On
  286                                                 // vmk, this event has the
  287                                                 // Context payload type
  288 #define VMCI_EVENT_MEM_ACCESS_OFF       6       // Applicable to VMX and vmk.
  289                                                 // Same as above for the payload
  290                                                 // type
  291 #define VMCI_EVENT_GUEST_PAUSED         7       // Applicable to vmk. This
  292                                                 // event has the Context
  293                                                 // payload type
  294 #define VMCI_EVENT_GUEST_UNPAUSED       8       // Applicable to vmk. Same as
  295                                                 // above for the payload type.
  296 #define VMCI_EVENT_MAX                  9
  297 
  298 /*
  299  * Of the above events, a few are reserved for use in the VMX, and other
  300  * endpoints (guest and host kernel) should not use them. For the rest of the
  301  * events, we allow both host and guest endpoints to subscribe to them, to
  302  * maintain the same API for host and guest endpoints.
  303  */
  304 
  305 #define VMCI_EVENT_VALID_VMX(_event)                                    \
  306         (_event == VMCI_EVENT_QP_PEER_ATTACH ||                         \
  307         _event == VMCI_EVENT_QP_PEER_DETACH ||                          \
  308         _event == VMCI_EVENT_MEM_ACCESS_ON ||                           \
  309         _event == VMCI_EVENT_MEM_ACCESS_OFF)
  310 
  311 #define VMCI_EVENT_VALID(_event)                                        \
  312         (_event < VMCI_EVENT_MAX &&                                     \
  313         _event != VMCI_EVENT_MEM_ACCESS_ON &&                           \
  314         _event != VMCI_EVENT_MEM_ACCESS_OFF &&                          \
  315         _event != VMCI_EVENT_GUEST_PAUSED &&                            \
  316         _event != VMCI_EVENT_GUEST_UNPAUSED)
  317 
  318 /* Reserved guest datagram resource ids. */
  319 #define VMCI_EVENT_HANDLER              0
  320 
  321 /*
  322  * VMCI coarse-grained privileges (per context or host process/endpoint. An
  323  * entity with the restricted flag is only allowed to interact with the
  324  * hypervisor and trusted entities.
  325  */
  326 typedef uint32_t vmci_privilege_flags;
  327 
  328 #define VMCI_PRIVILEGE_FLAG_RESTRICTED          0x01
  329 #define VMCI_PRIVILEGE_FLAG_TRUSTED             0x02
  330 #define VMCI_PRIVILEGE_ALL_FLAGS                                        \
  331         (VMCI_PRIVILEGE_FLAG_RESTRICTED | VMCI_PRIVILEGE_FLAG_TRUSTED)
  332 #define VMCI_NO_PRIVILEGE_FLAGS                 0x00
  333 #define VMCI_DEFAULT_PROC_PRIVILEGE_FLAGS       VMCI_NO_PRIVILEGE_FLAGS
  334 #define VMCI_LEAST_PRIVILEGE_FLAGS              VMCI_PRIVILEGE_FLAG_RESTRICTED
  335 #define VMCI_MAX_PRIVILEGE_FLAGS                VMCI_PRIVILEGE_FLAG_TRUSTED
  336 
  337 /* 0 through VMCI_RESERVED_RESOURCE_ID_MAX are reserved. */
  338 #define VMCI_RESERVED_RESOURCE_ID_MAX           1023
  339 
  340 #define VMCI_DOMAIN_NAME_MAXLEN                 32
  341 
  342 #define VMCI_LGPFX                              "vmci: "
  343 
  344 /*
  345  * struct vmci_queue_header
  346  *
  347  * A Queue cannot stand by itself as designed. Each Queue's header contains a
  348  * pointer into itself (the producer_tail) and into its peer (consumer_head).
  349  * The reason for the separation is one of accessibility: Each end-point can
  350  * modify two things: where the next location to enqueue is within its produce_q
  351  * (producer_tail); and where the next dequeue location is in its consume_q
  352  * (consumer_head).
  353  *
  354  * An end-point cannot modify the pointers of its peer (guest to guest; NOTE
  355  * that in the host both queue headers are mapped r/w). But, each end-point
  356  * needs read access to both Queue header structures in order to determine how
  357  * much space is used (or left) in the Queue. This is because for an end-point
  358  * to know how full its produce_q is, it needs to use the consumer_head that
  359  * points into the produce_q but -that- consumer_head is in the Queue header
  360  * for that end-points consume_q.
  361  *
  362  * Thoroughly confused?  Sorry.
  363  *
  364  * producer_tail: the point to enqueue new entrants.  When you approach a line
  365  * in a store, for example, you walk up to the tail.
  366  *
  367  * consumer_head: the point in the queue from which the next element is
  368  * dequeued. In other words, who is next in line is he who is at the head of
  369  * the line.
  370  *
  371  * Also, producer_tail points to an empty byte in the Queue, whereas
  372  * consumer_head points to a valid byte of data (unless producer_tail ==
  373  * consumer_head in which case consumerHead does not point to a valid byte of
  374  * data).
  375  *
  376  * For a queue of buffer 'size' bytes, the tail and head pointers will be in
  377  * the range [0, size-1].
  378  *
  379  * If produce_q_header->producer_tail == consume_q_header->consumer_head then
  380  * the produce_q is empty.
  381  */
  382 struct vmci_queue_header {
  383         /* All fields are 64bit and aligned. */
  384         struct vmci_handle      handle;         /* Identifier. */
  385         volatile uint64_t       producer_tail;  /* Offset in this queue. */
  386         volatile uint64_t       consumer_head;  /* Offset in peer queue. */
  387 };
  388 
  389 /*
  390  * If one client of a QueuePair is a 32bit entity, we restrict the QueuePair
  391  * size to be less than 4GB, and use 32bit atomic operations on the head and
  392  * tail pointers. 64bit atomic read on a 32bit entity involves cmpxchg8b which
  393  * is an atomic read-modify-write. This will cause traces to fire when a 32bit
  394  * consumer tries to read the producer's tail pointer, for example, because the
  395  * consumer has read-only access to the producer's tail pointer.
  396  *
  397  * We provide the following macros to invoke 32bit or 64bit atomic operations
  398  * based on the architecture the code is being compiled on.
  399  */
  400 
  401 #ifdef __x86_64__
  402 #define QP_MAX_QUEUE_SIZE_ARCH          CONST64U(0xffffffffffffffff)
  403 #define qp_atomic_read_offset(x)        atomic_load_64(x)
  404 #define qp_atomic_write_offset(x, y)    atomic_store_64(x, y)
  405 #else /* __x86_64__ */
  406         /*
  407          * Wrappers below are being used because atomic_store_<type> operates
  408          * on a specific <type>. Likewise for atomic_load_<type>
  409          */
  410 
  411         static inline uint32_t
  412         type_safe_atomic_read_32(void *var)
  413         {
  414                 return (atomic_load_32((volatile uint32_t *)(var)));
  415         }
  416 
  417         static inline void
  418         type_safe_atomic_write_32(void *var, uint32_t val)
  419         {
  420                 atomic_store_32((volatile uint32_t *)(var), (uint32_t)(val));
  421         }
  422 
  423 #define QP_MAX_QUEUE_SIZE_ARCH          CONST64U(0xffffffff)
  424 #define qp_atomic_read_offset(x)        type_safe_atomic_read_32((void *)(x))
  425 #define qp_atomic_write_offset(x, y)                                    \
  426         type_safe_atomic_write_32((void *)(x), (uint32_t)(y))
  427 #endif /* __x86_64__ */
  428 
  429 /*
  430  *------------------------------------------------------------------------------
  431  *
  432  * qp_add_pointer --
  433  *
  434  *     Helper to add a given offset to a head or tail pointer. Wraps the value
  435  *     of the pointer around the max size of the queue.
  436  *
  437  * Results:
  438  *     None.
  439  *
  440  * Side effects:
  441  *     None.
  442  *
  443  *------------------------------------------------------------------------------
  444  */
  445 
  446 static inline void
  447 qp_add_pointer(volatile uint64_t *var, size_t add, uint64_t size)
  448 {
  449         uint64_t new_val = qp_atomic_read_offset(var);
  450 
  451         if (new_val >= size - add)
  452                 new_val -= size;
  453 
  454         new_val += add;
  455         qp_atomic_write_offset(var, new_val);
  456 }
  457 
  458 /*
  459  *------------------------------------------------------------------------------
  460  *
  461  * vmci_queue_header_producer_tail --
  462  *
  463  *     Helper routine to get the Producer Tail from the supplied queue.
  464  *
  465  * Results:
  466  *     The contents of the queue's producer tail.
  467  *
  468  * Side effects:
  469  *     None.
  470  *
  471  *------------------------------------------------------------------------------
  472  */
  473 
  474 static inline uint64_t
  475 vmci_queue_header_producer_tail(const struct vmci_queue_header *q_header)
  476 {
  477         struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
  478         return (qp_atomic_read_offset(&qh->producer_tail));
  479 }
  480 
  481 /*
  482  *------------------------------------------------------------------------------
  483  *
  484  * vmci_queue_header_consumer_head --
  485  *
  486  *     Helper routine to get the Consumer Head from the supplied queue.
  487  *
  488  * Results:
  489  *     The contents of the queue's consumer tail.
  490  *
  491  * Side effects:
  492  *     None.
  493  *
  494  *------------------------------------------------------------------------------
  495  */
  496 
  497 static inline uint64_t
  498 vmci_queue_header_consumer_head(const struct vmci_queue_header *q_header)
  499 {
  500         struct vmci_queue_header *qh = (struct vmci_queue_header *)q_header;
  501         return (qp_atomic_read_offset(&qh->consumer_head));
  502 }
  503 
  504 /*
  505  *------------------------------------------------------------------------------
  506  *
  507  * vmci_queue_header_add_producer_tail --
  508  *
  509  *     Helper routine to increment the Producer Tail. Fundamentally,
  510  *     qp_add_pointer() is used to manipulate the tail itself.
  511  *
  512  * Results:
  513  *     None.
  514  *
  515  * Side effects:
  516  *     None.
  517  *
  518  *------------------------------------------------------------------------------
  519  */
  520 
  521 static inline void
  522 vmci_queue_header_add_producer_tail(struct vmci_queue_header *q_header,
  523     size_t add, uint64_t queue_size)
  524 {
  525 
  526         qp_add_pointer(&q_header->producer_tail, add, queue_size);
  527 }
  528 
  529 /*
  530  *------------------------------------------------------------------------------
  531  *
  532  * vmci_queue_header_add_consumer_head --
  533  *
  534  *     Helper routine to increment the Consumer Head. Fundamentally,
  535  *     qp_add_pointer() is used to manipulate the head itself.
  536  *
  537  * Results:
  538  *     None.
  539  *
  540  * Side effects:
  541  *     None.
  542  *
  543  *------------------------------------------------------------------------------
  544  */
  545 
  546 static inline void
  547 vmci_queue_header_add_consumer_head(struct vmci_queue_header *q_header,
  548     size_t add, uint64_t queue_size)
  549 {
  550 
  551         qp_add_pointer(&q_header->consumer_head, add, queue_size);
  552 }
  553 
  554 /*
  555  *------------------------------------------------------------------------------
  556  *
  557  * vmci_queue_header_get_pointers --
  558  *
  559  *     Helper routine for getting the head and the tail pointer for a queue.
  560  *     Both the VMCIQueues are needed to get both the pointers for one queue.
  561  *
  562  * Results:
  563  *     None.
  564  *
  565  * Side effects:
  566  *     None.
  567  *
  568  *------------------------------------------------------------------------------
  569  */
  570 
  571 static inline void
  572 vmci_queue_header_get_pointers(const struct vmci_queue_header *produce_q_header,
  573     const struct vmci_queue_header *consume_q_header, uint64_t *producer_tail,
  574     uint64_t *consumer_head)
  575 {
  576 
  577         if (producer_tail)
  578                 *producer_tail =
  579                     vmci_queue_header_producer_tail(produce_q_header);
  580 
  581         if (consumer_head)
  582                 *consumer_head =
  583                     vmci_queue_header_consumer_head(consume_q_header);
  584 }
  585 
  586 /*
  587  *------------------------------------------------------------------------------
  588  *
  589  * vmci_queue_header_reset_pointers --
  590  *
  591  *     Reset the tail pointer (of "this" queue) and the head pointer (of "peer"
  592  *     queue).
  593  *
  594  * Results:
  595  *     None.
  596  *
  597  * Side effects:
  598  *     None.
  599  *
  600  *------------------------------------------------------------------------------
  601  */
  602 
  603 static inline void
  604 vmci_queue_header_reset_pointers(struct vmci_queue_header *q_header)
  605 {
  606 
  607         qp_atomic_write_offset(&q_header->producer_tail, CONST64U(0));
  608         qp_atomic_write_offset(&q_header->consumer_head, CONST64U(0));
  609 }
  610 
  611 /*
  612  *------------------------------------------------------------------------------
  613  *
  614  * vmci_queue_header_init --
  615  *
  616  *     Initializes a queue's state (head & tail pointers).
  617  *
  618  * Results:
  619  *     None.
  620  *
  621  * Side effects:
  622  *     None.
  623  *
  624  *------------------------------------------------------------------------------
  625  */
  626 
  627 static inline void
  628 vmci_queue_header_init(struct vmci_queue_header *q_header,
  629     const struct vmci_handle handle)
  630 {
  631 
  632         q_header->handle = handle;
  633         vmci_queue_header_reset_pointers(q_header);
  634 }
  635 
  636 /*
  637  *------------------------------------------------------------------------------
  638  *
  639  * vmci_queue_header_free_space --
  640  *
  641  *     Finds available free space in a produce queue to enqueue more data or
  642  *     reports an error if queue pair corruption is detected.
  643  *
  644  * Results:
  645  *     Free space size in bytes or an error code.
  646  *
  647  * Side effects:
  648  *     None.
  649  *
  650  *------------------------------------------------------------------------------
  651  */
  652 
  653 static inline int64_t
  654 vmci_queue_header_free_space(const struct vmci_queue_header *produce_q_header,
  655     const struct vmci_queue_header *consume_q_header,
  656     const uint64_t produce_q_size)
  657 {
  658         uint64_t free_space;
  659         uint64_t head;
  660         uint64_t tail;
  661 
  662         tail = vmci_queue_header_producer_tail(produce_q_header);
  663         head = vmci_queue_header_consumer_head(consume_q_header);
  664 
  665         if (tail >= produce_q_size || head >= produce_q_size)
  666                 return (VMCI_ERROR_INVALID_SIZE);
  667 
  668         /*
  669          * Deduct 1 to avoid tail becoming equal to head which causes ambiguity.
  670          * If head and tail are equal it means that the queue is empty.
  671          */
  672 
  673         if (tail >= head)
  674                 free_space = produce_q_size - (tail - head) - 1;
  675         else
  676                 free_space = head - tail - 1;
  677 
  678         return (free_space);
  679 }
  680 
  681 /*
  682  *------------------------------------------------------------------------------
  683  *
  684  * vmci_queue_header_buf_ready --
  685  *
  686  *     vmci_queue_header_free_space() does all the heavy lifting of determing
  687  *     the number of free bytes in a Queue. This routine, then subtracts that
  688  *     size from the full size of the Queue so the caller knows how many bytes
  689  *     are ready to be dequeued.
  690  *
  691  * Results:
  692  *     On success, available data size in bytes (up to MAX_INT64).
  693  *     On failure, appropriate error code.
  694  *
  695  * Side effects:
  696  *     None.
  697  *
  698  *------------------------------------------------------------------------------
  699  */
  700 
  701 static inline int64_t
  702 vmci_queue_header_buf_ready(const struct vmci_queue_header *consume_q_header,
  703     const struct vmci_queue_header *produce_q_header,
  704     const uint64_t consume_q_size)
  705 {
  706         int64_t free_space;
  707 
  708         free_space = vmci_queue_header_free_space(consume_q_header,
  709             produce_q_header, consume_q_size);
  710         if (free_space < VMCI_SUCCESS)
  711                 return (free_space);
  712         else
  713                 return (consume_q_size - free_space - 1);
  714 }
  715 
  716 #endif /* !_VMCI_DEFS_H_ */

Cache object: 8faa2998f6cba6d0a630e18d9b837199


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.