The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/contrib/vchiq/interface/vchiq_arm/vchiq_arm.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /**
    2  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
    3  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
    4  *
    5  * Redistribution and use in source and binary forms, with or without
    6  * modification, are permitted provided that the following conditions
    7  * are met:
    8  * 1. Redistributions of source code must retain the above copyright
    9  *    notice, this list of conditions, and the following disclaimer,
   10  *    without modification.
   11  * 2. Redistributions in binary form must reproduce the above copyright
   12  *    notice, this list of conditions and the following disclaimer in the
   13  *    documentation and/or other materials provided with the distribution.
   14  * 3. The names of the above-listed copyright holders may not be used
   15  *    to endorse or promote products derived from this software without
   16  *    specific prior written permission.
   17  *
   18  * ALTERNATIVELY, this software may be distributed under the terms of the
   19  * GNU General Public License ("GPL") version 2, as published by the Free
   20  * Software Foundation.
   21  *
   22  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
   23  * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
   24  * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
   25  * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
   26  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
   27  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
   28  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
   29  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
   30  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
   31  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
   32  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
   33  */
   34 
   35 
   36 #include "vchiq_core.h"
   37 #include "vchiq_ioctl.h"
   38 #include "vchiq_arm.h"
   39 
   40 #define DEVICE_NAME "vchiq"
   41 
   42 /* Override the default prefix, which would be vchiq_arm (from the filename) */
   43 #undef MODULE_PARAM_PREFIX
   44 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
   45 
   46 #define VCHIQ_MINOR 0
   47 
   48 /* Some per-instance constants */
   49 #define MAX_COMPLETIONS 128
   50 #define MAX_SERVICES 64
   51 #define MAX_ELEMENTS 8
   52 #define MSG_QUEUE_SIZE 128
   53 
   54 #define KEEPALIVE_VER 1
   55 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
   56 
   57 /* Run time control of log level, based on KERN_XXX level. */
   58 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
   59 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
   60 
   61 #define SUSPEND_TIMER_TIMEOUT_MS 100
   62 #define SUSPEND_RETRY_TIMER_TIMEOUT_MS 1000
   63 
   64 #define VC_SUSPEND_NUM_OFFSET 3 /* number of values before idle which are -ve */
   65 static const char *const suspend_state_names[] = {
   66         "VC_SUSPEND_FORCE_CANCELED",
   67         "VC_SUSPEND_REJECTED",
   68         "VC_SUSPEND_FAILED",
   69         "VC_SUSPEND_IDLE",
   70         "VC_SUSPEND_REQUESTED",
   71         "VC_SUSPEND_IN_PROGRESS",
   72         "VC_SUSPEND_SUSPENDED"
   73 };
   74 #define VC_RESUME_NUM_OFFSET 1 /* number of values before idle which are -ve */
   75 static const char *const resume_state_names[] = {
   76         "VC_RESUME_FAILED",
   77         "VC_RESUME_IDLE",
   78         "VC_RESUME_REQUESTED",
   79         "VC_RESUME_IN_PROGRESS",
   80         "VC_RESUME_RESUMED"
   81 };
   82 /* The number of times we allow force suspend to timeout before actually
   83 ** _forcing_ suspend.  This is to cater for SW which fails to release vchiq
   84 ** correctly - we don't want to prevent ARM suspend indefinitely in this case.
   85 */
   86 #define FORCE_SUSPEND_FAIL_MAX 8
   87 
   88 /* The time in ms allowed for videocore to go idle when force suspend has been
   89  * requested */
   90 #define FORCE_SUSPEND_TIMEOUT_MS 200
   91 
   92 
   93 static void suspend_timer_callback(unsigned long context);
   94 #ifdef notyet
   95 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance);
   96 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance);
   97 #endif
   98 
   99 
  100 typedef struct user_service_struct {
  101         VCHIQ_SERVICE_T *service;
  102         void *userdata;
  103         VCHIQ_INSTANCE_T instance;
  104         char is_vchi;
  105         char dequeue_pending;
  106         char close_pending;
  107         int message_available_pos;
  108         int msg_insert;
  109         int msg_remove;
  110         struct semaphore insert_event;
  111         struct semaphore remove_event;
  112         struct semaphore close_event;
  113         VCHIQ_HEADER_T * msg_queue[MSG_QUEUE_SIZE];
  114 } USER_SERVICE_T;
  115 
  116 struct bulk_waiter_node {
  117         struct bulk_waiter bulk_waiter;
  118         int pid;
  119         struct list_head list;
  120 };
  121 
  122 struct vchiq_instance_struct {
  123         VCHIQ_STATE_T *state;
  124         VCHIQ_COMPLETION_DATA_T completions[MAX_COMPLETIONS];
  125         int completion_insert;
  126         int completion_remove;
  127         struct semaphore insert_event;
  128         struct semaphore remove_event;
  129         struct mutex completion_mutex;
  130 
  131         int connected;
  132         int closing;
  133         int pid;
  134         int mark;
  135         int use_close_delivered;
  136         int trace;
  137 
  138         struct list_head bulk_waiter_list;
  139         struct mutex bulk_waiter_list_mutex;
  140 
  141 #ifdef notyet
  142         VCHIQ_DEBUGFS_NODE_T proc_entry;
  143 #endif
  144 };
  145 
  146 typedef struct dump_context_struct {
  147         char __user *buf;
  148         size_t actual;
  149         size_t space;
  150         loff_t offset;
  151 } DUMP_CONTEXT_T;
  152 
  153 static struct cdev *  vchiq_cdev;
  154 VCHIQ_STATE_T g_state;
  155 static DEFINE_SPINLOCK(msg_queue_spinlock);
  156 
  157 static const char *const ioctl_names[] = {
  158         "CONNECT",
  159         "SHUTDOWN",
  160         "CREATE_SERVICE",
  161         "REMOVE_SERVICE",
  162         "QUEUE_MESSAGE",
  163         "QUEUE_BULK_TRANSMIT",
  164         "QUEUE_BULK_RECEIVE",
  165         "AWAIT_COMPLETION",
  166         "DEQUEUE_MESSAGE",
  167         "GET_CLIENT_ID",
  168         "GET_CONFIG",
  169         "CLOSE_SERVICE",
  170         "USE_SERVICE",
  171         "RELEASE_SERVICE",
  172         "SET_SERVICE_OPTION",
  173         "DUMP_PHYS_MEM",
  174         "LIB_VERSION",
  175         "CLOSE_DELIVERED"
  176 };
  177 
  178 vchiq_static_assert((sizeof(ioctl_names)/sizeof(ioctl_names[0])) ==
  179         (VCHIQ_IOC_MAX + 1));
  180 
  181 static d_open_t         vchiq_open;
  182 static d_close_t        vchiq_close;
  183 static d_ioctl_t        vchiq_ioctl;
  184 
  185 static struct cdevsw vchiq_cdevsw = {
  186         .d_version      = D_VERSION,
  187         .d_ioctl        = vchiq_ioctl,
  188         .d_open         = vchiq_open,
  189         .d_close        = vchiq_close,
  190         .d_name         = DEVICE_NAME,
  191 };
  192 
  193 #if 0
  194 static void
  195 dump_phys_mem(void *virt_addr, uint32_t num_bytes);
  196 #endif
  197 
  198 /****************************************************************************
  199 *
  200 *   add_completion
  201 *
  202 ***************************************************************************/
  203 
  204 static VCHIQ_STATUS_T
  205 add_completion(VCHIQ_INSTANCE_T instance, VCHIQ_REASON_T reason,
  206         VCHIQ_HEADER_T *header, USER_SERVICE_T *user_service,
  207         void *bulk_userdata)
  208 {
  209         VCHIQ_COMPLETION_DATA_T *completion;
  210         int insert;
  211         DEBUG_INITIALISE(g_state.local)
  212 
  213         insert = instance->completion_insert;
  214         while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
  215                 /* Out of space - wait for the client */
  216                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  217                 vchiq_log_trace(vchiq_arm_log_level,
  218                         "add_completion - completion queue full");
  219                 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
  220 
  221                 if (down_interruptible(&instance->remove_event) != 0) {
  222                         vchiq_log_info(vchiq_arm_log_level,
  223                                 "service_callback interrupted");
  224                         return VCHIQ_RETRY;
  225                 }
  226 
  227                 if (instance->closing) {
  228                         vchiq_log_info(vchiq_arm_log_level,
  229                                 "service_callback closing");
  230                         return VCHIQ_SUCCESS;
  231                 }
  232                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  233         }
  234 
  235         completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
  236 
  237         completion->header = header;
  238         completion->reason = reason;
  239         /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
  240         completion->service_userdata = user_service->service;
  241         completion->bulk_userdata = bulk_userdata;
  242 
  243         if (reason == VCHIQ_SERVICE_CLOSED) {
  244                 /* Take an extra reference, to be held until
  245                    this CLOSED notification is delivered. */
  246                 lock_service(user_service->service);
  247                 if (instance->use_close_delivered)
  248                         user_service->close_pending = 1;
  249         }
  250 
  251         /* A write barrier is needed here to ensure that the entire completion
  252                 record is written out before the insert point. */
  253         wmb();
  254 
  255         if (reason == VCHIQ_MESSAGE_AVAILABLE)
  256                 user_service->message_available_pos = insert;
  257 
  258         instance->completion_insert = ++insert;
  259 
  260         up(&instance->insert_event);
  261 
  262         return VCHIQ_SUCCESS;
  263 }
  264 
  265 /****************************************************************************
  266 *
  267 *   service_callback
  268 *
  269 ***************************************************************************/
  270 
  271 static VCHIQ_STATUS_T
  272 service_callback(VCHIQ_REASON_T reason, VCHIQ_HEADER_T *header,
  273         VCHIQ_SERVICE_HANDLE_T handle, void *bulk_userdata)
  274 {
  275         /* How do we ensure the callback goes to the right client?
  276         ** The service_user data points to a USER_SERVICE_T record containing
  277         ** the original callback and the user state structure, which contains a
  278         ** circular buffer for completion records.
  279         */
  280         USER_SERVICE_T *user_service;
  281         VCHIQ_SERVICE_T *service;
  282         VCHIQ_INSTANCE_T instance;
  283         int skip_completion = 0;
  284         DEBUG_INITIALISE(g_state.local)
  285 
  286         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  287 
  288         service = handle_to_service(handle);
  289         BUG_ON(!service);
  290         user_service = (USER_SERVICE_T *)service->base.userdata;
  291         instance = user_service->instance;
  292 
  293         if (!instance || instance->closing)
  294                 return VCHIQ_SUCCESS;
  295 
  296         vchiq_log_trace(vchiq_arm_log_level,
  297                 "service_callback - service %lx(%d,%p), reason %d, header %lx, "
  298                 "instance %lx, bulk_userdata %lx",
  299                 (unsigned long)user_service,
  300                 service->localport, user_service->userdata,
  301                 reason, (unsigned long)header,
  302                 (unsigned long)instance, (unsigned long)bulk_userdata);
  303 
  304         if (header && user_service->is_vchi) {
  305                 spin_lock(&msg_queue_spinlock);
  306                 while (user_service->msg_insert ==
  307                         (user_service->msg_remove + MSG_QUEUE_SIZE)) {
  308                         spin_unlock(&msg_queue_spinlock);
  309                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  310                         DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
  311                         vchiq_log_trace(vchiq_arm_log_level,
  312                                 "service_callback - msg queue full");
  313                         /* If there is no MESSAGE_AVAILABLE in the completion
  314                         ** queue, add one
  315                         */
  316                         if ((user_service->message_available_pos -
  317                                 instance->completion_remove) < 0) {
  318                                 VCHIQ_STATUS_T status;
  319                                 vchiq_log_info(vchiq_arm_log_level,
  320                                         "Inserting extra MESSAGE_AVAILABLE");
  321                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  322                                 status = add_completion(instance, reason,
  323                                         NULL, user_service, bulk_userdata);
  324                                 if (status != VCHIQ_SUCCESS) {
  325                                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  326                                         return status;
  327                                 }
  328                         }
  329 
  330                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  331                         if (down_interruptible(&user_service->remove_event)
  332                                 != 0) {
  333                                 vchiq_log_info(vchiq_arm_log_level,
  334                                         "service_callback interrupted");
  335                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  336                                 return VCHIQ_RETRY;
  337                         } else if (instance->closing) {
  338                                 vchiq_log_info(vchiq_arm_log_level,
  339                                         "service_callback closing");
  340                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  341                                 return VCHIQ_ERROR;
  342                         }
  343                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  344                         spin_lock(&msg_queue_spinlock);
  345                 }
  346 
  347                 user_service->msg_queue[user_service->msg_insert &
  348                         (MSG_QUEUE_SIZE - 1)] = header;
  349                 user_service->msg_insert++;
  350 
  351                 /* If there is a thread waiting in DEQUEUE_MESSAGE, or if
  352                 ** there is a MESSAGE_AVAILABLE in the completion queue then
  353                 ** bypass the completion queue.
  354                 */
  355                 if (((user_service->message_available_pos -
  356                         instance->completion_remove) >= 0) ||
  357                         user_service->dequeue_pending) {
  358                         user_service->dequeue_pending = 0;
  359                         skip_completion = 1;
  360                 }
  361 
  362                 spin_unlock(&msg_queue_spinlock);
  363 
  364                 up(&user_service->insert_event);
  365 
  366                 header = NULL;
  367         }
  368 
  369         if (skip_completion) {
  370                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  371                 return VCHIQ_SUCCESS;
  372         }
  373 
  374         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
  375 
  376         return add_completion(instance, reason, header, user_service,
  377                 bulk_userdata);
  378 }
  379 
  380 /****************************************************************************
  381 *
  382 *   user_service_free
  383 *
  384 ***************************************************************************/
  385 static void
  386 user_service_free(void *userdata)
  387 {
  388         USER_SERVICE_T *user_service = userdata;
  389         
  390         _sema_destroy(&user_service->insert_event);
  391         _sema_destroy(&user_service->remove_event);
  392 
  393         kfree(user_service);
  394 }
  395 
  396 /****************************************************************************
  397 *
  398 *   close_delivered
  399 *
  400 ***************************************************************************/
  401 static void close_delivered(USER_SERVICE_T *user_service)
  402 {
  403         vchiq_log_info(vchiq_arm_log_level,
  404                 "close_delivered(handle=%x)",
  405                 user_service->service->handle);
  406 
  407         if (user_service->close_pending) {
  408                 /* Allow the underlying service to be culled */
  409                 unlock_service(user_service->service);
  410 
  411                 /* Wake the user-thread blocked in close_ or remove_service */
  412                 up(&user_service->close_event);
  413  
  414                 user_service->close_pending = 0;
  415         }
  416 }
  417 
  418 /****************************************************************************
  419 *
  420 *   vchiq_ioctl
  421 *
  422 ***************************************************************************/
  423 
  424 static int
  425 vchiq_ioctl(struct cdev *cdev, u_long cmd, caddr_t arg, int fflag,
  426    struct thread *td)
  427 {
  428         VCHIQ_INSTANCE_T instance;
  429         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
  430         VCHIQ_SERVICE_T *service = NULL;
  431         int ret = 0;
  432         int i, rc;
  433         DEBUG_INITIALISE(g_state.local)
  434 
  435         if ((ret = devfs_get_cdevpriv((void**)&instance))) {
  436                 printf("vchiq_ioctl: devfs_get_cdevpriv failed: error %d\n", ret);
  437                 return (ret);
  438         }
  439 
  440 /* XXXBSD: HACK! */
  441 #define _IOC_NR(x) ((x) & 0xff)
  442 #define _IOC_TYPE(x)    IOCGROUP(x)
  443 
  444         vchiq_log_trace(vchiq_arm_log_level,
  445                  "vchiq_ioctl - instance %x, cmd %s, arg %p",
  446                 (unsigned int)instance,
  447                 ((_IOC_TYPE(cmd) == VCHIQ_IOC_MAGIC) &&
  448                 (_IOC_NR(cmd) <= VCHIQ_IOC_MAX)) ?
  449                 ioctl_names[_IOC_NR(cmd)] : "<invalid>", arg);
  450 
  451         switch (cmd) {
  452         case VCHIQ_IOC_SHUTDOWN:
  453                 if (!instance->connected)
  454                         break;
  455 
  456                 /* Remove all services */
  457                 i = 0;
  458                 while ((service = next_service_by_instance(instance->state,
  459                         instance, &i)) != NULL) {
  460                         status = vchiq_remove_service(service->handle);
  461                         unlock_service(service);
  462                         if (status != VCHIQ_SUCCESS)
  463                                 break;
  464                 }
  465                 service = NULL;
  466 
  467                 if (status == VCHIQ_SUCCESS) {
  468                         /* Wake the completion thread and ask it to exit */
  469                         instance->closing = 1;
  470                         up(&instance->insert_event);
  471                 }
  472 
  473                 break;
  474 
  475         case VCHIQ_IOC_CONNECT:
  476                 if (instance->connected) {
  477                         ret = -EINVAL;
  478                         break;
  479                 }
  480                 rc = lmutex_lock_interruptible(&instance->state->mutex);
  481                 if (rc != 0) {
  482                         vchiq_log_error(vchiq_arm_log_level,
  483                                 "vchiq: connect: could not lock mutex for "
  484                                 "state %d: %d",
  485                                 instance->state->id, rc);
  486                         ret = -EINTR;
  487                         break;
  488                 }
  489                 status = vchiq_connect_internal(instance->state, instance);
  490                 lmutex_unlock(&instance->state->mutex);
  491 
  492                 if (status == VCHIQ_SUCCESS)
  493                         instance->connected = 1;
  494                 else
  495                         vchiq_log_error(vchiq_arm_log_level,
  496                                 "vchiq: could not connect: %d", status);
  497                 break;
  498 
  499         case VCHIQ_IOC_CREATE_SERVICE: {
  500                 VCHIQ_CREATE_SERVICE_T args;
  501                 USER_SERVICE_T *user_service = NULL;
  502                 void *userdata;
  503                 int srvstate;
  504 
  505                 memcpy(&args, (const void*)arg, sizeof(args));
  506 
  507                 user_service = kmalloc(sizeof(USER_SERVICE_T), GFP_KERNEL);
  508                 if (!user_service) {
  509                         ret = -ENOMEM;
  510                         break;
  511                 }
  512 
  513                 if (args.is_open) {
  514                         if (!instance->connected) {
  515                                 ret = -ENOTCONN;
  516                                 kfree(user_service);
  517                                 break;
  518                         }
  519                         srvstate = VCHIQ_SRVSTATE_OPENING;
  520                 } else {
  521                         srvstate =
  522                                  instance->connected ?
  523                                  VCHIQ_SRVSTATE_LISTENING :
  524                                  VCHIQ_SRVSTATE_HIDDEN;
  525                 }
  526 
  527                 userdata = args.params.userdata;
  528                 args.params.callback = service_callback;
  529                 args.params.userdata = user_service;
  530                 service = vchiq_add_service_internal(
  531                                 instance->state,
  532                                 &args.params, srvstate,
  533                                 instance, user_service_free);
  534 
  535                 if (service != NULL) {
  536                         user_service->service = service;
  537                         user_service->userdata = userdata;
  538                         user_service->instance = instance;
  539                         user_service->is_vchi = (args.is_vchi != 0);
  540                         user_service->dequeue_pending = 0;
  541                         user_service->close_pending = 0;
  542                         user_service->message_available_pos =
  543                                 instance->completion_remove - 1;
  544                         user_service->msg_insert = 0;
  545                         user_service->msg_remove = 0;
  546                         _sema_init(&user_service->insert_event, 0);
  547                         _sema_init(&user_service->remove_event, 0);
  548                         _sema_init(&user_service->close_event, 0);
  549 
  550                         if (args.is_open) {
  551                                 status = vchiq_open_service_internal
  552                                         (service, instance->pid);
  553                                 if (status != VCHIQ_SUCCESS) {
  554                                         vchiq_remove_service(service->handle);
  555                                         service = NULL;
  556                                         ret = (status == VCHIQ_RETRY) ?
  557                                                 -EINTR : -EIO;
  558                                         break;
  559                                 }
  560                         }
  561 
  562 #ifdef VCHIQ_IOCTL_DEBUG
  563                         printf("%s: [CREATE SERVICE] handle = %08x\n", __func__, service->handle);
  564 #endif
  565                         memcpy((void *)
  566                                 &(((VCHIQ_CREATE_SERVICE_T*)
  567                                         arg)->handle),
  568                                 (const void *)&service->handle,
  569                                 sizeof(service->handle));
  570 
  571                         service = NULL;
  572                 } else {
  573                         ret = -EEXIST;
  574                         kfree(user_service);
  575                 }
  576         } break;
  577 
  578         case VCHIQ_IOC_CLOSE_SERVICE: {
  579                 VCHIQ_SERVICE_HANDLE_T handle;
  580 
  581                 memcpy(&handle, (const void*)arg, sizeof(handle));
  582 
  583 #ifdef VCHIQ_IOCTL_DEBUG
  584                 printf("%s: [CLOSE SERVICE] handle = %08x\n", __func__, handle);
  585 #endif
  586 
  587                 service = find_service_for_instance(instance, handle);
  588                 if (service != NULL) {
  589                         USER_SERVICE_T *user_service =
  590                                 (USER_SERVICE_T *)service->base.userdata;
  591                         /* close_pending is false on first entry, and when the
  592                            wait in vchiq_close_service has been interrupted. */
  593                         if (!user_service->close_pending) {
  594                                 status = vchiq_close_service(service->handle);
  595                                 if (status != VCHIQ_SUCCESS)
  596                                         break;
  597                         }
  598 
  599                         /* close_pending is true once the underlying service
  600                            has been closed until the client library calls the
  601                            CLOSE_DELIVERED ioctl, signalling close_event. */
  602                         if (user_service->close_pending &&
  603                                 down_interruptible(&user_service->close_event))
  604                                 status = VCHIQ_RETRY;
  605                 }
  606                 else
  607                         ret = -EINVAL;
  608         } break;
  609 
  610         case VCHIQ_IOC_REMOVE_SERVICE: {
  611                 VCHIQ_SERVICE_HANDLE_T handle;
  612 
  613                 memcpy(&handle, (const void*)arg, sizeof(handle));
  614 
  615 #ifdef VCHIQ_IOCTL_DEBUG
  616                 printf("%s: [REMOVE SERVICE] handle = %08x\n", __func__, handle);
  617 #endif
  618 
  619                 service = find_service_for_instance(instance, handle);
  620                 if (service != NULL) {
  621                         USER_SERVICE_T *user_service =
  622                                 (USER_SERVICE_T *)service->base.userdata;
  623                         /* close_pending is false on first entry, and when the
  624                            wait in vchiq_close_service has been interrupted. */
  625                         if (!user_service->close_pending) {
  626                                 status = vchiq_remove_service(service->handle);
  627                                 if (status != VCHIQ_SUCCESS)
  628                                         break;
  629                         }
  630 
  631                         /* close_pending is true once the underlying service
  632                            has been closed until the client library calls the
  633                            CLOSE_DELIVERED ioctl, signalling close_event. */
  634                         if (user_service->close_pending &&
  635                                 down_interruptible(&user_service->close_event))
  636                                 status = VCHIQ_RETRY;
  637                 }
  638                 else
  639                         ret = -EINVAL;
  640         } break;
  641 
  642         case VCHIQ_IOC_USE_SERVICE:
  643         case VCHIQ_IOC_RELEASE_SERVICE: {
  644                 VCHIQ_SERVICE_HANDLE_T handle;
  645 
  646                 memcpy(&handle, (const void*)arg, sizeof(handle));
  647 
  648 #ifdef VCHIQ_IOCTL_DEBUG
  649                 printf("%s: [%s SERVICE] handle = %08x\n", __func__,
  650                     cmd == VCHIQ_IOC_USE_SERVICE ? "USE" : "RELEASE", handle);
  651 #endif
  652 
  653                 service = find_service_for_instance(instance, handle);
  654                 if (service != NULL) {
  655                         status = (cmd == VCHIQ_IOC_USE_SERVICE) ?
  656                                 vchiq_use_service_internal(service) :
  657                                 vchiq_release_service_internal(service);
  658                         if (status != VCHIQ_SUCCESS) {
  659                                 vchiq_log_error(vchiq_susp_log_level,
  660                                         "%s: cmd %s returned error %d for "
  661                                         "service %c%c%c%c:%8x",
  662                                         __func__,
  663                                         (cmd == VCHIQ_IOC_USE_SERVICE) ?
  664                                                 "VCHIQ_IOC_USE_SERVICE" :
  665                                                 "VCHIQ_IOC_RELEASE_SERVICE",
  666                                         status,
  667                                         VCHIQ_FOURCC_AS_4CHARS(
  668                                                 service->base.fourcc),
  669                                         service->client_id);
  670                                 ret = -EINVAL;
  671                         }
  672                 } else
  673                         ret = -EINVAL;
  674         } break;
  675 
  676         case VCHIQ_IOC_QUEUE_MESSAGE: {
  677                 VCHIQ_QUEUE_MESSAGE_T args;
  678                 memcpy(&args, (const void*)arg, sizeof(args));
  679 
  680 #ifdef VCHIQ_IOCTL_DEBUG
  681                 printf("%s: [QUEUE MESSAGE] handle = %08x\n", __func__, args.handle);
  682 #endif
  683 
  684                 service = find_service_for_instance(instance, args.handle);
  685 
  686                 if ((service != NULL) && (args.count <= MAX_ELEMENTS)) {
  687                         /* Copy elements into kernel space */
  688                         VCHIQ_ELEMENT_T elements[MAX_ELEMENTS];
  689                         if (copy_from_user(elements, args.elements,
  690                                 args.count * sizeof(VCHIQ_ELEMENT_T)) == 0)
  691                                 status = vchiq_queue_message
  692                                         (args.handle,
  693                                         elements, args.count);
  694                         else
  695                                 ret = -EFAULT;
  696                 } else {
  697                         ret = -EINVAL;
  698                 }
  699         } break;
  700 
  701         case VCHIQ_IOC_QUEUE_BULK_TRANSMIT:
  702         case VCHIQ_IOC_QUEUE_BULK_RECEIVE: {
  703                 VCHIQ_QUEUE_BULK_TRANSFER_T args;
  704                 struct bulk_waiter_node *waiter = NULL;
  705                 VCHIQ_BULK_DIR_T dir =
  706                         (cmd == VCHIQ_IOC_QUEUE_BULK_TRANSMIT) ?
  707                         VCHIQ_BULK_TRANSMIT : VCHIQ_BULK_RECEIVE;
  708 
  709                 memcpy(&args, (const void*)arg, sizeof(args));
  710 
  711                 service = find_service_for_instance(instance, args.handle);
  712                 if (!service) {
  713                         ret = -EINVAL;
  714                         break;
  715                 }
  716 
  717                 if (args.mode == VCHIQ_BULK_MODE_BLOCKING) {
  718                         waiter = kzalloc(sizeof(struct bulk_waiter_node),
  719                                 GFP_KERNEL);
  720                         if (!waiter) {
  721                                 ret = -ENOMEM;
  722                                 break;
  723                         }
  724                         args.userdata = &waiter->bulk_waiter;
  725                 } else if (args.mode == VCHIQ_BULK_MODE_WAITING) {
  726                         struct list_head *pos;
  727                         lmutex_lock(&instance->bulk_waiter_list_mutex);
  728                         list_for_each(pos, &instance->bulk_waiter_list) {
  729                                 if (list_entry(pos, struct bulk_waiter_node,
  730                                         list)->pid == current->p_pid) {
  731                                         waiter = list_entry(pos,
  732                                                 struct bulk_waiter_node,
  733                                                 list);
  734                                         list_del(pos);
  735                                         break;
  736                                 }
  737 
  738                         }
  739                         lmutex_unlock(&instance->bulk_waiter_list_mutex);
  740                         if (!waiter) {
  741                                 vchiq_log_error(vchiq_arm_log_level,
  742                                         "no bulk_waiter found for pid %d",
  743                                         current->p_pid);
  744                                 ret = -ESRCH;
  745                                 break;
  746                         }
  747                         vchiq_log_info(vchiq_arm_log_level,
  748                                 "found bulk_waiter %x for pid %d",
  749                                 (unsigned int)waiter, current->p_pid);
  750                         args.userdata = &waiter->bulk_waiter;
  751                 }
  752                 status = vchiq_bulk_transfer
  753                         (args.handle,
  754                          VCHI_MEM_HANDLE_INVALID,
  755                          args.data, args.size,
  756                          args.userdata, args.mode,
  757                          dir);
  758                 if (!waiter)
  759                         break;
  760                 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) ||
  761                         !waiter->bulk_waiter.bulk) {
  762                         if (waiter->bulk_waiter.bulk) {
  763                                 /* Cancel the signal when the transfer
  764                                 ** completes. */
  765                                 spin_lock(&bulk_waiter_spinlock);
  766                                 waiter->bulk_waiter.bulk->userdata = NULL;
  767                                 spin_unlock(&bulk_waiter_spinlock);
  768                         }
  769                         _sema_destroy(&waiter->bulk_waiter.event);
  770                         kfree(waiter);
  771                 } else {
  772                         const VCHIQ_BULK_MODE_T mode_waiting =
  773                                 VCHIQ_BULK_MODE_WAITING;
  774                         waiter->pid = current->p_pid;
  775                         lmutex_lock(&instance->bulk_waiter_list_mutex);
  776                         list_add(&waiter->list, &instance->bulk_waiter_list);
  777                         lmutex_unlock(&instance->bulk_waiter_list_mutex);
  778                         vchiq_log_info(vchiq_arm_log_level,
  779                                 "saved bulk_waiter %x for pid %d",
  780                                 (unsigned int)waiter, current->p_pid);
  781 
  782                         memcpy((void *)
  783                                 &(((VCHIQ_QUEUE_BULK_TRANSFER_T *)
  784                                         arg)->mode),
  785                                 (const void *)&mode_waiting,
  786                                 sizeof(mode_waiting));
  787                 }
  788         } break;
  789 
  790         case VCHIQ_IOC_AWAIT_COMPLETION: {
  791                 VCHIQ_AWAIT_COMPLETION_T args;
  792                 int count = 0;
  793 
  794                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
  795                 if (!instance->connected) {
  796                         ret = -ENOTCONN;
  797                         break;
  798                 }
  799 
  800                 memcpy(&args, (const void*)arg, sizeof(args));
  801 
  802                 lmutex_lock(&instance->completion_mutex);
  803 
  804                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
  805                 while ((instance->completion_remove ==
  806                         instance->completion_insert)
  807                         && !instance->closing) {
  808 
  809                         DEBUG_TRACE(AWAIT_COMPLETION_LINE);
  810                         lmutex_unlock(&instance->completion_mutex);
  811                         rc = down_interruptible(&instance->insert_event);
  812                         lmutex_lock(&instance->completion_mutex);
  813                         if (rc != 0) {
  814                                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
  815                                 vchiq_log_info(vchiq_arm_log_level,
  816                                         "AWAIT_COMPLETION interrupted");
  817                                 ret = -EINTR;
  818                                 break;
  819                         }
  820                 }
  821                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
  822 
  823                 if (ret == 0) {
  824                         int msgbufcount = args.msgbufcount;
  825                         int remove;
  826 
  827                         remove = instance->completion_remove;
  828 
  829                         for (count = 0; count < args.count; count++) {
  830                                 VCHIQ_COMPLETION_DATA_T *completion;
  831                                 VCHIQ_SERVICE_T *service1;
  832                                 USER_SERVICE_T *user_service;
  833                                 VCHIQ_HEADER_T *header;
  834 
  835                                 if (remove == instance->completion_insert)
  836                                         break;
  837 
  838                                 completion = &instance->completions[
  839                                         remove & (MAX_COMPLETIONS - 1)];
  840 
  841 
  842                                 /* A read memory barrier is needed to prevent
  843                                 ** the prefetch of a stale completion record
  844                                 */
  845                                 rmb();
  846 
  847                                 service1 = completion->service_userdata;
  848                                 user_service = service1->base.userdata;
  849                                 completion->service_userdata =
  850                                         user_service->userdata;
  851 
  852                                 header = completion->header;
  853                                 if (header) {
  854                                         void __user *msgbuf;
  855                                         int msglen;
  856 
  857                                         msglen = header->size +
  858                                                 sizeof(VCHIQ_HEADER_T);
  859                                         /* This must be a VCHIQ-style service */
  860                                         if (args.msgbufsize < msglen) {
  861                                                 vchiq_log_error(
  862                                                         vchiq_arm_log_level,
  863                                                         "header %x: msgbufsize"
  864                                                         " %x < msglen %x",
  865                                                         (unsigned int)header,
  866                                                         args.msgbufsize,
  867                                                         msglen);
  868                                                 WARN(1, "invalid message "
  869                                                         "size\n");
  870                                                 if (count == 0)
  871                                                         ret = -EMSGSIZE;
  872                                                 break;
  873                                         }
  874                                         if (msgbufcount <= 0)
  875                                                 /* Stall here for lack of a
  876                                                 ** buffer for the message. */
  877                                                 break;
  878                                         /* Get the pointer from user space */
  879                                         msgbufcount--;
  880                                         if (copy_from_user(&msgbuf,
  881                                                 (const void __user *)
  882                                                 &args.msgbufs[msgbufcount],
  883                                                 sizeof(msgbuf)) != 0) {
  884                                                 if (count == 0)
  885                                                         ret = -EFAULT;
  886                                                 break;
  887                                         }
  888 
  889                                         /* Copy the message to user space */
  890                                         if (copy_to_user(msgbuf, header,
  891                                                 msglen) != 0) {
  892                                                 if (count == 0)
  893                                                         ret = -EFAULT;
  894                                                 break;
  895                                         }
  896 
  897                                         /* Now it has been copied, the message
  898                                         ** can be released. */
  899                                         vchiq_release_message(service1->handle,
  900                                                 header);
  901 
  902                                         /* The completion must point to the
  903                                         ** msgbuf. */
  904                                         completion->header = msgbuf;
  905                                 }
  906 
  907                                 if ((completion->reason ==
  908                                         VCHIQ_SERVICE_CLOSED) &&
  909                                         !instance->use_close_delivered)
  910                                         unlock_service(service1);
  911 
  912                                 if (copy_to_user((void __user *)(
  913                                         (size_t)args.buf +
  914                                         count * sizeof(VCHIQ_COMPLETION_DATA_T)),
  915                                         completion,
  916                                         sizeof(VCHIQ_COMPLETION_DATA_T)) != 0) {
  917                                                 if (ret == 0)
  918                                                         ret = -EFAULT;
  919                                         break;
  920                                 }
  921 
  922                                 /* Ensure that the above copy has completed
  923                                 ** before advancing the remove pointer. */
  924                                 mb();
  925 
  926                                 instance->completion_remove = ++remove;
  927                         }
  928 
  929                         if (msgbufcount != args.msgbufcount) {
  930                                 memcpy((void __user *)
  931                                         &((VCHIQ_AWAIT_COMPLETION_T *)arg)->
  932                                                 msgbufcount,
  933                                         &msgbufcount,
  934                                         sizeof(msgbufcount));
  935                         }
  936 
  937                          if (count != args.count)
  938                          {
  939                                 memcpy((void __user *)
  940                                         &((VCHIQ_AWAIT_COMPLETION_T *)arg)->count,
  941                                         &count, sizeof(count));
  942                         }
  943                 }
  944 
  945                 if (count != 0)
  946                         up(&instance->remove_event);
  947 
  948                 if ((ret == 0) && instance->closing)
  949                         ret = -ENOTCONN;
  950                 /* 
  951                  * XXXBSD: ioctl return codes are not negative as in linux, so
  952                  * we can not indicate success with positive number of passed 
  953                  * messages
  954                  */
  955                 if (ret > 0)
  956                         ret = 0;
  957 
  958                 lmutex_unlock(&instance->completion_mutex);
  959                 DEBUG_TRACE(AWAIT_COMPLETION_LINE);
  960         } break;
  961 
  962         case VCHIQ_IOC_DEQUEUE_MESSAGE: {
  963                 VCHIQ_DEQUEUE_MESSAGE_T args;
  964                 USER_SERVICE_T *user_service;
  965                 VCHIQ_HEADER_T *header;
  966 
  967                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
  968                 memcpy(&args, (const void*)arg, sizeof(args));
  969                 service = find_service_for_instance(instance, args.handle);
  970                 if (!service) {
  971                         ret = -EINVAL;
  972                         break;
  973                 }
  974                 user_service = (USER_SERVICE_T *)service->base.userdata;
  975                 if (user_service->is_vchi == 0) {
  976                         ret = -EINVAL;
  977                         break;
  978                 }
  979 
  980                 spin_lock(&msg_queue_spinlock);
  981                 if (user_service->msg_remove == user_service->msg_insert) {
  982                         if (!args.blocking) {
  983                                 spin_unlock(&msg_queue_spinlock);
  984                                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
  985                                 ret = -EWOULDBLOCK;
  986                                 break;
  987                         }
  988                         user_service->dequeue_pending = 1;
  989                         do {
  990                                 spin_unlock(&msg_queue_spinlock);
  991                                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
  992                                 if (down_interruptible(
  993                                         &user_service->insert_event) != 0) {
  994                                         vchiq_log_info(vchiq_arm_log_level,
  995                                                 "DEQUEUE_MESSAGE interrupted");
  996                                         ret = -EINTR;
  997                                         break;
  998                                 }
  999                                 spin_lock(&msg_queue_spinlock);
 1000                         } while (user_service->msg_remove ==
 1001                                 user_service->msg_insert);
 1002 
 1003                         if (ret)
 1004                                 break;
 1005                 }
 1006 
 1007                 BUG_ON((int)(user_service->msg_insert -
 1008                         user_service->msg_remove) < 0);
 1009 
 1010                 header = user_service->msg_queue[user_service->msg_remove &
 1011                         (MSG_QUEUE_SIZE - 1)];
 1012                 user_service->msg_remove++;
 1013                 spin_unlock(&msg_queue_spinlock);
 1014 
 1015                 up(&user_service->remove_event);
 1016                 if (header == NULL)
 1017                         ret = -ENOTCONN;
 1018                 else if (header->size <= args.bufsize) {
 1019                         /* Copy to user space if msgbuf is not NULL */
 1020                         if ((args.buf == NULL) ||
 1021                                 (copy_to_user((void __user *)args.buf,
 1022                                 header->data,
 1023                                 header->size) == 0)) {
 1024                                 args.bufsize = header->size;
 1025                                 memcpy((void *)arg, &args,
 1026                                     sizeof(args));
 1027                                 vchiq_release_message(
 1028                                         service->handle,
 1029                                         header);
 1030                         } else
 1031                                 ret = -EFAULT;
 1032                 } else {
 1033                         vchiq_log_error(vchiq_arm_log_level,
 1034                                 "header %x: bufsize %x < size %x",
 1035                                 (unsigned int)header, args.bufsize,
 1036                                 header->size);
 1037                         WARN(1, "invalid size\n");
 1038                         ret = -EMSGSIZE;
 1039                 }
 1040                 DEBUG_TRACE(DEQUEUE_MESSAGE_LINE);
 1041         } break;
 1042 
 1043         case VCHIQ_IOC_GET_CLIENT_ID: {
 1044                 VCHIQ_SERVICE_HANDLE_T handle;
 1045 
 1046                 memcpy(&handle, (const void*)arg, sizeof(handle));
 1047 
 1048                 ret = vchiq_get_client_id(handle);
 1049         } break;
 1050 
 1051         case VCHIQ_IOC_GET_CONFIG: {
 1052                 VCHIQ_GET_CONFIG_T args;
 1053                 VCHIQ_CONFIG_T config;
 1054 
 1055                 memcpy(&args, (const void*)arg, sizeof(args));
 1056                 if (args.config_size > sizeof(config)) {
 1057                         ret = -EINVAL;
 1058                         break;
 1059                 }
 1060                 status = vchiq_get_config(instance, args.config_size, &config);
 1061                 if (status == VCHIQ_SUCCESS) {
 1062                         if (copy_to_user((void __user *)args.pconfig,
 1063                                     &config, args.config_size) != 0) {
 1064                                 ret = -EFAULT;
 1065                                 break;
 1066                         }
 1067                 }
 1068         } break;
 1069 
 1070         case VCHIQ_IOC_SET_SERVICE_OPTION: {
 1071                 VCHIQ_SET_SERVICE_OPTION_T args;
 1072 
 1073                 memcpy(&args, (const void*)arg, sizeof(args));
 1074 
 1075                 service = find_service_for_instance(instance, args.handle);
 1076                 if (!service) {
 1077                         ret = -EINVAL;
 1078                         break;
 1079                 }
 1080 
 1081                 status = vchiq_set_service_option(
 1082                                 args.handle, args.option, args.value);
 1083         } break;
 1084 
 1085         case VCHIQ_IOC_DUMP_PHYS_MEM: {
 1086                 VCHIQ_DUMP_MEM_T  args;
 1087 
 1088                 memcpy(&args, (const void*)arg, sizeof(args));
 1089                 printf("IMPLEMENT ME: %s:%d\n", __FILE__, __LINE__);
 1090 #if 0
 1091                 dump_phys_mem(args.virt_addr, args.num_bytes);
 1092 #endif
 1093         } break;
 1094 
 1095         case VCHIQ_IOC_LIB_VERSION: {
 1096                 unsigned int lib_version = (unsigned int)arg;
 1097 
 1098                 if (lib_version < VCHIQ_VERSION_MIN)
 1099                         ret = -EINVAL;
 1100                 else if (lib_version >= VCHIQ_VERSION_CLOSE_DELIVERED)
 1101                         instance->use_close_delivered = 1;
 1102         } break;
 1103 
 1104         case VCHIQ_IOC_CLOSE_DELIVERED: {
 1105                 VCHIQ_SERVICE_HANDLE_T handle;
 1106                 memcpy(&handle, (const void*)arg, sizeof(handle));
 1107 
 1108                 service = find_closed_service_for_instance(instance, handle);
 1109                 if (service != NULL) {
 1110                         USER_SERVICE_T *user_service =
 1111                                 (USER_SERVICE_T *)service->base.userdata;
 1112                         close_delivered(user_service);
 1113                 }
 1114                 else
 1115                         ret = -EINVAL;
 1116         } break;
 1117 
 1118         default:
 1119                 ret = -ENOTTY;
 1120                 break;
 1121         }
 1122 
 1123         if (service)
 1124                 unlock_service(service);
 1125 
 1126         if (ret == 0) {
 1127                 if (status == VCHIQ_ERROR)
 1128                         ret = -EIO;
 1129                 else if (status == VCHIQ_RETRY)
 1130                         ret = -EINTR;
 1131         }
 1132 
 1133         if ((status == VCHIQ_SUCCESS) && (ret < 0) && (ret != -EINTR) &&
 1134                 (ret != -EWOULDBLOCK))
 1135                 vchiq_log_info(vchiq_arm_log_level,
 1136                         "  ioctl instance %lx, cmd %s -> status %d, %d",
 1137                         (unsigned long)instance,
 1138                         (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
 1139                                 ioctl_names[_IOC_NR(cmd)] :
 1140                                 "<invalid>",
 1141                         status, ret);
 1142         else
 1143                 vchiq_log_trace(vchiq_arm_log_level,
 1144                         "  ioctl instance %lx, cmd %s -> status %d, %d",
 1145                         (unsigned long)instance,
 1146                         (_IOC_NR(cmd) <= VCHIQ_IOC_MAX) ?
 1147                                 ioctl_names[_IOC_NR(cmd)] :
 1148                                 "<invalid>",
 1149                         status, ret);
 1150 
 1151         /* XXXBSD: report BSD-style error to userland */
 1152         if (ret < 0)
 1153                 ret = -ret;
 1154 
 1155         return ret;
 1156 }
 1157 
 1158 static void
 1159 instance_dtr(void *data)
 1160 {
 1161 
 1162         kfree(data);
 1163 }
 1164 
 1165 /****************************************************************************
 1166 *
 1167 *   vchiq_open
 1168 *
 1169 ***************************************************************************/
 1170 
 1171 static int
 1172 vchiq_open(struct cdev *dev, int flags, int fmt __unused, struct thread *td)
 1173 {
 1174         vchiq_log_info(vchiq_arm_log_level, "vchiq_open");
 1175         /* XXXBSD: do we really need this check? */
 1176         if (1) {
 1177                 VCHIQ_STATE_T *state = vchiq_get_state();
 1178                 VCHIQ_INSTANCE_T instance;
 1179 
 1180                 if (!state) {
 1181                         vchiq_log_error(vchiq_arm_log_level,
 1182                                 "vchiq has no connection to VideoCore");
 1183                         return -ENOTCONN;
 1184                 }
 1185 
 1186                 instance = kmalloc(sizeof(*instance), GFP_KERNEL);
 1187                 if (!instance)
 1188                         return -ENOMEM;
 1189 
 1190                 instance->state = state;
 1191                 /* XXXBSD: PID or thread ID? */
 1192                 instance->pid = td->td_proc->p_pid;
 1193 
 1194 #ifdef notyet
 1195                 ret = vchiq_proc_add_instance(instance);
 1196                 if (ret != 0) {
 1197                         kfree(instance);
 1198                         return ret;
 1199                 }
 1200 #endif
 1201 
 1202                 _sema_init(&instance->insert_event, 0);
 1203                 _sema_init(&instance->remove_event, 0);
 1204                 lmutex_init(&instance->completion_mutex);
 1205                 lmutex_init(&instance->bulk_waiter_list_mutex);
 1206                 INIT_LIST_HEAD(&instance->bulk_waiter_list);
 1207 
 1208                 devfs_set_cdevpriv(instance, instance_dtr);
 1209         } 
 1210         else {
 1211                 vchiq_log_error(vchiq_arm_log_level,
 1212                         "Unknown minor device");
 1213                 return -ENXIO;
 1214         }
 1215 
 1216         return 0;
 1217 }
 1218 
 1219 /****************************************************************************
 1220 *
 1221 *   vchiq_release
 1222 *
 1223 ***************************************************************************/
 1224 
 1225 static int
 1226 vchiq_close(struct cdev *dev, int flags __unused, int fmt __unused,
 1227                 struct thread *td)
 1228 {
 1229         int ret = 0;
 1230         if (1) {
 1231                 VCHIQ_INSTANCE_T instance;
 1232                 VCHIQ_STATE_T *state = vchiq_get_state();
 1233                 VCHIQ_SERVICE_T *service;
 1234                 int i;
 1235 
 1236                 if ((ret = devfs_get_cdevpriv((void**)&instance))) {
 1237                         printf("devfs_get_cdevpriv failed: error %d\n", ret);
 1238                         return (ret);
 1239                 }
 1240 
 1241                 vchiq_log_info(vchiq_arm_log_level,
 1242                         "vchiq_release: instance=%lx",
 1243                         (unsigned long)instance);
 1244 
 1245                 if (!state) {
 1246                         ret = -EPERM;
 1247                         goto out;
 1248                 }
 1249 
 1250                 /* Ensure videocore is awake to allow termination. */
 1251                 vchiq_use_internal(instance->state, NULL,
 1252                                 USE_TYPE_VCHIQ);
 1253 
 1254                 lmutex_lock(&instance->completion_mutex);
 1255 
 1256                 /* Wake the completion thread and ask it to exit */
 1257                 instance->closing = 1;
 1258                 up(&instance->insert_event);
 1259 
 1260                 lmutex_unlock(&instance->completion_mutex);
 1261 
 1262                 /* Wake the slot handler if the completion queue is full. */
 1263                 up(&instance->remove_event);
 1264 
 1265                 /* Mark all services for termination... */
 1266                 i = 0;
 1267                 while ((service = next_service_by_instance(state, instance,
 1268                         &i)) != NULL) {
 1269                         USER_SERVICE_T *user_service = service->base.userdata;
 1270 
 1271                         /* Wake the slot handler if the msg queue is full. */
 1272                         up(&user_service->remove_event);
 1273 
 1274                         vchiq_terminate_service_internal(service);
 1275                         unlock_service(service);
 1276                 }
 1277 
 1278                 /* ...and wait for them to die */
 1279                 i = 0;
 1280                 while ((service = next_service_by_instance(state, instance, &i))
 1281                         != NULL) {
 1282                         USER_SERVICE_T *user_service = service->base.userdata;
 1283 
 1284                         down(&service->remove_event);
 1285 
 1286                         BUG_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
 1287 
 1288                         spin_lock(&msg_queue_spinlock);
 1289 
 1290                         while (user_service->msg_remove !=
 1291                                 user_service->msg_insert) {
 1292                                 VCHIQ_HEADER_T *header = user_service->
 1293                                         msg_queue[user_service->msg_remove &
 1294                                                 (MSG_QUEUE_SIZE - 1)];
 1295                                 user_service->msg_remove++;
 1296                                 spin_unlock(&msg_queue_spinlock);
 1297 
 1298                                 if (header)
 1299                                         vchiq_release_message(
 1300                                                 service->handle,
 1301                                                 header);
 1302                                 spin_lock(&msg_queue_spinlock);
 1303                         }
 1304 
 1305                         spin_unlock(&msg_queue_spinlock);
 1306 
 1307                         unlock_service(service);
 1308                 }
 1309 
 1310                 /* Release any closed services */
 1311                 while (instance->completion_remove !=
 1312                         instance->completion_insert) {
 1313                         VCHIQ_COMPLETION_DATA_T *completion;
 1314                         VCHIQ_SERVICE_T *service1;
 1315                         completion = &instance->completions[
 1316                                 instance->completion_remove &
 1317                                 (MAX_COMPLETIONS - 1)];
 1318                         service1 = completion->service_userdata;
 1319                         if (completion->reason == VCHIQ_SERVICE_CLOSED)
 1320                         {
 1321                                 USER_SERVICE_T *user_service =
 1322                                         service->base.userdata;
 1323 
 1324                                 /* Wake any blocked user-thread */
 1325                                 if (instance->use_close_delivered)
 1326                                         up(&user_service->close_event);
 1327                                 unlock_service(service1);
 1328                         }
 1329                         instance->completion_remove++;
 1330                 }
 1331 
 1332                 /* Release the PEER service count. */
 1333                 vchiq_release_internal(instance->state, NULL);
 1334 
 1335                 {
 1336                         struct list_head *pos, *next;
 1337                         list_for_each_safe(pos, next,
 1338                                 &instance->bulk_waiter_list) {
 1339                                 struct bulk_waiter_node *waiter;
 1340                                 waiter = list_entry(pos,
 1341                                         struct bulk_waiter_node,
 1342                                         list);
 1343                                 list_del(pos);
 1344                                 vchiq_log_info(vchiq_arm_log_level,
 1345                                         "bulk_waiter - cleaned up %x "
 1346                                         "for pid %d",
 1347                                         (unsigned int)waiter, waiter->pid);
 1348                                 _sema_destroy(&waiter->bulk_waiter.event);
 1349                                 kfree(waiter);
 1350                         }
 1351                 }
 1352 
 1353         }
 1354         else {
 1355                 vchiq_log_error(vchiq_arm_log_level,
 1356                         "Unknown minor device");
 1357                 ret = -ENXIO;
 1358         }
 1359 
 1360 out:
 1361         return ret;
 1362 }
 1363 
 1364 /****************************************************************************
 1365 *
 1366 *   vchiq_dump
 1367 *
 1368 ***************************************************************************/
 1369 
 1370 void
 1371 vchiq_dump(void *dump_context, const char *str, int len)
 1372 {
 1373         DUMP_CONTEXT_T *context = (DUMP_CONTEXT_T *)dump_context;
 1374 
 1375         if (context->actual < context->space) {
 1376                 int copy_bytes;
 1377                 if (context->offset > 0) {
 1378                         int skip_bytes = min(len, (int)context->offset);
 1379                         str += skip_bytes;
 1380                         len -= skip_bytes;
 1381                         context->offset -= skip_bytes;
 1382                         if (context->offset > 0)
 1383                                 return;
 1384                 }
 1385                 copy_bytes = min(len, (int)(context->space - context->actual));
 1386                 if (copy_bytes == 0)
 1387                         return;
 1388                 memcpy(context->buf + context->actual, str, copy_bytes);
 1389                 context->actual += copy_bytes;
 1390                 len -= copy_bytes;
 1391 
 1392                 /* If tne terminating NUL is included in the length, then it
 1393                 ** marks the end of a line and should be replaced with a
 1394                 ** carriage return. */
 1395                 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
 1396                         char cr = '\n';
 1397                         memcpy(context->buf + context->actual - 1, &cr, 1);
 1398                 }
 1399         }
 1400 }
 1401 
 1402 /****************************************************************************
 1403 *
 1404 *   vchiq_dump_platform_instance_state
 1405 *
 1406 ***************************************************************************/
 1407 
 1408 void
 1409 vchiq_dump_platform_instances(void *dump_context)
 1410 {
 1411         VCHIQ_STATE_T *state = vchiq_get_state();
 1412         char buf[80];
 1413         int len;
 1414         int i;
 1415 
 1416         /* There is no list of instances, so instead scan all services,
 1417                 marking those that have been dumped. */
 1418 
 1419         for (i = 0; i < state->unused_service; i++) {
 1420                 VCHIQ_SERVICE_T *service = state->services[i];
 1421                 VCHIQ_INSTANCE_T instance;
 1422 
 1423                 if (service && (service->base.callback == service_callback)) {
 1424                         instance = service->instance;
 1425                         if (instance)
 1426                                 instance->mark = 0;
 1427                 }
 1428         }
 1429 
 1430         for (i = 0; i < state->unused_service; i++) {
 1431                 VCHIQ_SERVICE_T *service = state->services[i];
 1432                 VCHIQ_INSTANCE_T instance;
 1433 
 1434                 if (service && (service->base.callback == service_callback)) {
 1435                         instance = service->instance;
 1436                         if (instance && !instance->mark) {
 1437                                 len = snprintf(buf, sizeof(buf),
 1438                                         "Instance %x: pid %d,%s completions "
 1439                                                 "%d/%d",
 1440                                         (unsigned int)instance, instance->pid,
 1441                                         instance->connected ? " connected, " :
 1442                                                 "",
 1443                                         instance->completion_insert -
 1444                                                 instance->completion_remove,
 1445                                         MAX_COMPLETIONS);
 1446 
 1447                                 vchiq_dump(dump_context, buf, len + 1);
 1448 
 1449                                 instance->mark = 1;
 1450                         }
 1451                 }
 1452         }
 1453 }
 1454 
 1455 /****************************************************************************
 1456 *
 1457 *   vchiq_dump_platform_service_state
 1458 *
 1459 ***************************************************************************/
 1460 
 1461 void
 1462 vchiq_dump_platform_service_state(void *dump_context, VCHIQ_SERVICE_T *service)
 1463 {
 1464         USER_SERVICE_T *user_service = (USER_SERVICE_T *)service->base.userdata;
 1465         char buf[80];
 1466         int len;
 1467 
 1468         len = snprintf(buf, sizeof(buf), "  instance %x",
 1469                 (unsigned int)service->instance);
 1470 
 1471         if ((service->base.callback == service_callback) &&
 1472                 user_service->is_vchi) {
 1473                 len += snprintf(buf + len, sizeof(buf) - len,
 1474                         ", %d/%d messages",
 1475                         user_service->msg_insert - user_service->msg_remove,
 1476                         MSG_QUEUE_SIZE);
 1477 
 1478                 if (user_service->dequeue_pending)
 1479                         len += snprintf(buf + len, sizeof(buf) - len,
 1480                                 " (dequeue pending)");
 1481         }
 1482 
 1483         vchiq_dump(dump_context, buf, len + 1);
 1484 }
 1485 
 1486 #ifdef notyet
 1487 /****************************************************************************
 1488 *
 1489 *   dump_user_mem
 1490 *
 1491 ***************************************************************************/
 1492 
 1493 static void
 1494 dump_phys_mem(void *virt_addr, uint32_t num_bytes)
 1495 {
 1496         int            rc;
 1497         uint8_t       *end_virt_addr = virt_addr + num_bytes;
 1498         int            num_pages;
 1499         int            offset;
 1500         int            end_offset;
 1501         int            page_idx;
 1502         int            prev_idx;
 1503         struct page   *page;
 1504         struct page  **pages;
 1505         uint8_t       *kmapped_virt_ptr;
 1506 
 1507         /* Align virtAddr and endVirtAddr to 16 byte boundaries. */
 1508 
 1509         virt_addr = (void *)((unsigned long)virt_addr & ~0x0fuL);
 1510         end_virt_addr = (void *)(((unsigned long)end_virt_addr + 15uL) &
 1511                 ~0x0fuL);
 1512 
 1513         offset = (int)(long)virt_addr & (PAGE_SIZE - 1);
 1514         end_offset = (int)(long)end_virt_addr & (PAGE_SIZE - 1);
 1515 
 1516         num_pages = (offset + num_bytes + PAGE_SIZE - 1) / PAGE_SIZE;
 1517 
 1518         pages = kmalloc(sizeof(struct page *) * num_pages, GFP_KERNEL);
 1519         if (pages == NULL) {
 1520                 vchiq_log_error(vchiq_arm_log_level,
 1521                         "Unable to allocation memory for %d pages\n",
 1522                         num_pages);
 1523                 return;
 1524         }
 1525 
 1526         down_read(&current->mm->mmap_sem);
 1527         rc = get_user_pages(current,      /* task */
 1528                 current->mm,              /* mm */
 1529                 (unsigned long)virt_addr, /* start */
 1530                 num_pages,                /* len */
 1531                 0,                        /* write */
 1532                 0,                        /* force */
 1533                 pages,                    /* pages (array of page pointers) */
 1534                 NULL);                    /* vmas */
 1535         up_read(&current->mm->mmap_sem);
 1536 
 1537         prev_idx = -1;
 1538         page = NULL;
 1539 
 1540         while (offset < end_offset) {
 1541 
 1542                 int page_offset = offset % PAGE_SIZE;
 1543                 page_idx = offset / PAGE_SIZE;
 1544 
 1545                 if (page_idx != prev_idx) {
 1546 
 1547                         if (page != NULL)
 1548                                 kunmap(page);
 1549                         page = pages[page_idx];
 1550                         kmapped_virt_ptr = kmap(page);
 1551 
 1552                         prev_idx = page_idx;
 1553                 }
 1554 
 1555                 if (vchiq_arm_log_level >= VCHIQ_LOG_TRACE)
 1556                         vchiq_log_dump_mem("ph",
 1557                                 (uint32_t)(unsigned long)&kmapped_virt_ptr[
 1558                                         page_offset],
 1559                                 &kmapped_virt_ptr[page_offset], 16);
 1560 
 1561                 offset += 16;
 1562         }
 1563         if (page != NULL)
 1564                 kunmap(page);
 1565 
 1566         for (page_idx = 0; page_idx < num_pages; page_idx++)
 1567                 page_cache_release(pages[page_idx]);
 1568 
 1569         kfree(pages);
 1570 }
 1571 
 1572 /****************************************************************************
 1573 *
 1574 *   vchiq_read
 1575 *
 1576 ***************************************************************************/
 1577 
 1578 static ssize_t
 1579 vchiq_read(struct file *file, char __user *buf,
 1580         size_t count, loff_t *ppos)
 1581 {
 1582         DUMP_CONTEXT_T context;
 1583         context.buf = buf;
 1584         context.actual = 0;
 1585         context.space = count;
 1586         context.offset = *ppos;
 1587 
 1588         vchiq_dump_state(&context, &g_state);
 1589 
 1590         *ppos += context.actual;
 1591 
 1592         return context.actual;
 1593 }
 1594 #endif
 1595 
 1596 VCHIQ_STATE_T *
 1597 vchiq_get_state(void)
 1598 {
 1599 
 1600         if (g_state.remote == NULL)
 1601                 printk(KERN_ERR "%s: g_state.remote == NULL\n", __func__);
 1602         else if (g_state.remote->initialised != 1)
 1603                 printk(KERN_NOTICE "%s: g_state.remote->initialised != 1 (%d)\n",
 1604                         __func__, g_state.remote->initialised);
 1605 
 1606         return ((g_state.remote != NULL) &&
 1607                 (g_state.remote->initialised == 1)) ? &g_state : NULL;
 1608 }
 1609 
 1610 /*
 1611  * Autosuspend related functionality
 1612  */
 1613 
 1614 int
 1615 vchiq_videocore_wanted(VCHIQ_STATE_T *state)
 1616 {
 1617         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 1618         if (!arm_state)
 1619                 /* autosuspend not supported - always return wanted */
 1620                 return 1;
 1621         else if (arm_state->blocked_count)
 1622                 return 1;
 1623         else if (!arm_state->videocore_use_count)
 1624                 /* usage count zero - check for override unless we're forcing */
 1625                 if (arm_state->resume_blocked)
 1626                         return 0;
 1627                 else
 1628                         return vchiq_platform_videocore_wanted(state);
 1629         else
 1630                 /* non-zero usage count - videocore still required */
 1631                 return 1;
 1632 }
 1633 
 1634 static VCHIQ_STATUS_T
 1635 vchiq_keepalive_vchiq_callback(VCHIQ_REASON_T reason,
 1636         VCHIQ_HEADER_T *header,
 1637         VCHIQ_SERVICE_HANDLE_T service_user,
 1638         void *bulk_user)
 1639 {
 1640         vchiq_log_error(vchiq_susp_log_level,
 1641                 "%s callback reason %d", __func__, reason);
 1642         return 0;
 1643 }
 1644 
 1645 static int
 1646 vchiq_keepalive_thread_func(void *v)
 1647 {
 1648         VCHIQ_STATE_T *state = (VCHIQ_STATE_T *) v;
 1649         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 1650 
 1651         VCHIQ_STATUS_T status;
 1652         VCHIQ_INSTANCE_T instance;
 1653         VCHIQ_SERVICE_HANDLE_T ka_handle;
 1654 
 1655         VCHIQ_SERVICE_PARAMS_T params = {
 1656                 .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
 1657                 .callback    = vchiq_keepalive_vchiq_callback,
 1658                 .version     = KEEPALIVE_VER,
 1659                 .version_min = KEEPALIVE_VER_MIN
 1660         };
 1661 
 1662         status = vchiq_initialise(&instance);
 1663         if (status != VCHIQ_SUCCESS) {
 1664                 vchiq_log_error(vchiq_susp_log_level,
 1665                         "%s vchiq_initialise failed %d", __func__, status);
 1666                 goto exit;
 1667         }
 1668 
 1669         status = vchiq_connect(instance);
 1670         if (status != VCHIQ_SUCCESS) {
 1671                 vchiq_log_error(vchiq_susp_log_level,
 1672                         "%s vchiq_connect failed %d", __func__, status);
 1673                 goto shutdown;
 1674         }
 1675 
 1676         status = vchiq_add_service(instance, &params, &ka_handle);
 1677         if (status != VCHIQ_SUCCESS) {
 1678                 vchiq_log_error(vchiq_susp_log_level,
 1679                         "%s vchiq_open_service failed %d", __func__, status);
 1680                 goto shutdown;
 1681         }
 1682 
 1683         while (1) {
 1684                 long rc = 0, uc = 0;
 1685                 if (wait_for_completion_interruptible(&arm_state->ka_evt)
 1686                                 != 0) {
 1687                         vchiq_log_error(vchiq_susp_log_level,
 1688                                 "%s interrupted", __func__);
 1689                         flush_signals(current);
 1690                         continue;
 1691                 }
 1692 
 1693                 /* read and clear counters.  Do release_count then use_count to
 1694                  * prevent getting more releases than uses */
 1695                 rc = atomic_xchg(&arm_state->ka_release_count, 0);
 1696                 uc = atomic_xchg(&arm_state->ka_use_count, 0);
 1697 
 1698                 /* Call use/release service the requisite number of times.
 1699                  * Process use before release so use counts don't go negative */
 1700                 while (uc--) {
 1701                         atomic_inc(&arm_state->ka_use_ack_count);
 1702                         status = vchiq_use_service(ka_handle);
 1703                         if (status != VCHIQ_SUCCESS) {
 1704                                 vchiq_log_error(vchiq_susp_log_level,
 1705                                         "%s vchiq_use_service error %d",
 1706                                         __func__, status);
 1707                         }
 1708                 }
 1709                 while (rc--) {
 1710                         status = vchiq_release_service(ka_handle);
 1711                         if (status != VCHIQ_SUCCESS) {
 1712                                 vchiq_log_error(vchiq_susp_log_level,
 1713                                         "%s vchiq_release_service error %d",
 1714                                         __func__, status);
 1715                         }
 1716                 }
 1717         }
 1718 
 1719 shutdown:
 1720         vchiq_shutdown(instance);
 1721 exit:
 1722         return 0;
 1723 }
 1724 
 1725 VCHIQ_STATUS_T
 1726 vchiq_arm_init_state(VCHIQ_STATE_T *state, VCHIQ_ARM_STATE_T *arm_state)
 1727 {
 1728         VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
 1729 
 1730         if (arm_state) {
 1731                 rwlock_init(&arm_state->susp_res_lock);
 1732 
 1733                 init_completion(&arm_state->ka_evt);
 1734                 atomic_set(&arm_state->ka_use_count, 0);
 1735                 atomic_set(&arm_state->ka_use_ack_count, 0);
 1736                 atomic_set(&arm_state->ka_release_count, 0);
 1737 
 1738                 init_completion(&arm_state->vc_suspend_complete);
 1739 
 1740                 init_completion(&arm_state->vc_resume_complete);
 1741                 /* Initialise to 'done' state.  We only want to block on resume
 1742                  * completion while videocore is suspended. */
 1743                 set_resume_state(arm_state, VC_RESUME_RESUMED);
 1744 
 1745                 init_completion(&arm_state->resume_blocker);
 1746                 /* Initialise to 'done' state.  We only want to block on this
 1747                  * completion while resume is blocked */
 1748                 complete_all(&arm_state->resume_blocker);
 1749 
 1750                 init_completion(&arm_state->blocked_blocker);
 1751                 /* Initialise to 'done' state.  We only want to block on this
 1752                  * completion while things are waiting on the resume blocker */
 1753                 complete_all(&arm_state->blocked_blocker);
 1754 
 1755                 arm_state->suspend_timer_timeout = SUSPEND_TIMER_TIMEOUT_MS;
 1756                 arm_state->suspend_timer_running = 0;
 1757                 vchiq_init_timer(&arm_state->suspend_timer);
 1758                 arm_state->suspend_timer.data = (unsigned long)(state);
 1759                 arm_state->suspend_timer.function = suspend_timer_callback;
 1760 
 1761                 arm_state->first_connect = 0;
 1762 
 1763         }
 1764         return status;
 1765 }
 1766 
 1767 /*
 1768 ** Functions to modify the state variables;
 1769 **      set_suspend_state
 1770 **      set_resume_state
 1771 **
 1772 ** There are more state variables than we might like, so ensure they remain in
 1773 ** step.  Suspend and resume state are maintained separately, since most of
 1774 ** these state machines can operate independently.  However, there are a few
 1775 ** states where state transitions in one state machine cause a reset to the
 1776 ** other state machine.  In addition, there are some completion events which
 1777 ** need to occur on state machine reset and end-state(s), so these are also
 1778 ** dealt with in these functions.
 1779 **
 1780 ** In all states we set the state variable according to the input, but in some
 1781 ** cases we perform additional steps outlined below;
 1782 **
 1783 ** VC_SUSPEND_IDLE - Initialise the suspend completion at the same time.
 1784 **                      The suspend completion is completed after any suspend
 1785 **                      attempt.  When we reset the state machine we also reset
 1786 **                      the completion.  This reset occurs when videocore is
 1787 **                      resumed, and also if we initiate suspend after a suspend
 1788 **                      failure.
 1789 **
 1790 ** VC_SUSPEND_IN_PROGRESS - This state is considered the point of no return for
 1791 **                      suspend - ie from this point on we must try to suspend
 1792 **                      before resuming can occur.  We therefore also reset the
 1793 **                      resume state machine to VC_RESUME_IDLE in this state.
 1794 **
 1795 ** VC_SUSPEND_SUSPENDED - Suspend has completed successfully. Also call
 1796 **                      complete_all on the suspend completion to notify
 1797 **                      anything waiting for suspend to happen.
 1798 **
 1799 ** VC_SUSPEND_REJECTED - Videocore rejected suspend. Videocore will also
 1800 **                      initiate resume, so no need to alter resume state.
 1801 **                      We call complete_all on the suspend completion to notify
 1802 **                      of suspend rejection.
 1803 **
 1804 ** VC_SUSPEND_FAILED - We failed to initiate videocore suspend.  We notify the
 1805 **                      suspend completion and reset the resume state machine.
 1806 **
 1807 ** VC_RESUME_IDLE - Initialise the resume completion at the same time.  The
 1808 **                      resume completion is in its 'done' state whenever
 1809 **                      videcore is running.  Therfore, the VC_RESUME_IDLE state
 1810 **                      implies that videocore is suspended.
 1811 **                      Hence, any thread which needs to wait until videocore is
 1812 **                      running can wait on this completion - it will only block
 1813 **                      if videocore is suspended.
 1814 **
 1815 ** VC_RESUME_RESUMED - Resume has completed successfully.  Videocore is running.
 1816 **                      Call complete_all on the resume completion to unblock
 1817 **                      any threads waiting for resume.  Also reset the suspend
 1818 **                      state machine to it's idle state.
 1819 **
 1820 ** VC_RESUME_FAILED - Currently unused - no mechanism to fail resume exists.
 1821 */
 1822 
 1823 void
 1824 set_suspend_state(VCHIQ_ARM_STATE_T *arm_state,
 1825         enum vc_suspend_status new_state)
 1826 {
 1827         /* set the state in all cases */
 1828         arm_state->vc_suspend_state = new_state;
 1829 
 1830         /* state specific additional actions */
 1831         switch (new_state) {
 1832         case VC_SUSPEND_FORCE_CANCELED:
 1833                 complete_all(&arm_state->vc_suspend_complete);
 1834                 break;
 1835         case VC_SUSPEND_REJECTED:
 1836                 complete_all(&arm_state->vc_suspend_complete);
 1837                 break;
 1838         case VC_SUSPEND_FAILED:
 1839                 complete_all(&arm_state->vc_suspend_complete);
 1840                 arm_state->vc_resume_state = VC_RESUME_RESUMED;
 1841                 complete_all(&arm_state->vc_resume_complete);
 1842                 break;
 1843         case VC_SUSPEND_IDLE:
 1844                 /* TODO: reinit_completion */
 1845                 INIT_COMPLETION(arm_state->vc_suspend_complete);
 1846                 break;
 1847         case VC_SUSPEND_REQUESTED:
 1848                 break;
 1849         case VC_SUSPEND_IN_PROGRESS:
 1850                 set_resume_state(arm_state, VC_RESUME_IDLE);
 1851                 break;
 1852         case VC_SUSPEND_SUSPENDED:
 1853                 complete_all(&arm_state->vc_suspend_complete);
 1854                 break;
 1855         default:
 1856                 BUG();
 1857                 break;
 1858         }
 1859 }
 1860 
 1861 void
 1862 set_resume_state(VCHIQ_ARM_STATE_T *arm_state,
 1863         enum vc_resume_status new_state)
 1864 {
 1865         /* set the state in all cases */
 1866         arm_state->vc_resume_state = new_state;
 1867 
 1868         /* state specific additional actions */
 1869         switch (new_state) {
 1870         case VC_RESUME_FAILED:
 1871                 break;
 1872         case VC_RESUME_IDLE:
 1873                 /* TODO: reinit_completion */
 1874                 INIT_COMPLETION(arm_state->vc_resume_complete);
 1875                 break;
 1876         case VC_RESUME_REQUESTED:
 1877                 break;
 1878         case VC_RESUME_IN_PROGRESS:
 1879                 break;
 1880         case VC_RESUME_RESUMED:
 1881                 complete_all(&arm_state->vc_resume_complete);
 1882                 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
 1883                 break;
 1884         default:
 1885                 BUG();
 1886                 break;
 1887         }
 1888 }
 1889 
 1890 
 1891 /* should be called with the write lock held */
 1892 inline void
 1893 start_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
 1894 {
 1895         vchiq_del_timer(&arm_state->suspend_timer);
 1896         arm_state->suspend_timer.expires = jiffies +
 1897                 msecs_to_jiffies(arm_state->
 1898                         suspend_timer_timeout);
 1899         vchiq_add_timer(&arm_state->suspend_timer);
 1900         arm_state->suspend_timer_running = 1;
 1901 }
 1902 
 1903 /* should be called with the write lock held */
 1904 static inline void
 1905 stop_suspend_timer(VCHIQ_ARM_STATE_T *arm_state)
 1906 {
 1907         if (arm_state->suspend_timer_running) {
 1908                 vchiq_del_timer(&arm_state->suspend_timer);
 1909                 arm_state->suspend_timer_running = 0;
 1910         }
 1911 }
 1912 
 1913 static inline int
 1914 need_resume(VCHIQ_STATE_T *state)
 1915 {
 1916         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 1917         return (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) &&
 1918                         (arm_state->vc_resume_state < VC_RESUME_REQUESTED) &&
 1919                         vchiq_videocore_wanted(state);
 1920 }
 1921 
 1922 static int
 1923 block_resume(VCHIQ_ARM_STATE_T *arm_state)
 1924 {
 1925         int status = VCHIQ_SUCCESS;
 1926         const unsigned long timeout_val =
 1927                                 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS);
 1928         int resume_count = 0;
 1929 
 1930         /* Allow any threads which were blocked by the last force suspend to
 1931          * complete if they haven't already.  Only give this one shot; if
 1932          * blocked_count is incremented after blocked_blocker is completed
 1933          * (which only happens when blocked_count hits 0) then those threads
 1934          * will have to wait until next time around */
 1935         if (arm_state->blocked_count) {
 1936                 /* TODO: reinit_completion */
 1937                 INIT_COMPLETION(arm_state->blocked_blocker);
 1938                 write_unlock_bh(&arm_state->susp_res_lock);
 1939                 vchiq_log_info(vchiq_susp_log_level, "%s wait for previously "
 1940                         "blocked clients", __func__);
 1941                 if (wait_for_completion_interruptible_timeout(
 1942                                 &arm_state->blocked_blocker, timeout_val)
 1943                                         <= 0) {
 1944                         vchiq_log_error(vchiq_susp_log_level, "%s wait for "
 1945                                 "previously blocked clients failed" , __func__);
 1946                         status = VCHIQ_ERROR;
 1947                         write_lock_bh(&arm_state->susp_res_lock);
 1948                         goto out;
 1949                 }
 1950                 vchiq_log_info(vchiq_susp_log_level, "%s previously blocked "
 1951                         "clients resumed", __func__);
 1952                 write_lock_bh(&arm_state->susp_res_lock);
 1953         }
 1954 
 1955         /* We need to wait for resume to complete if it's in process */
 1956         while (arm_state->vc_resume_state != VC_RESUME_RESUMED &&
 1957                         arm_state->vc_resume_state > VC_RESUME_IDLE) {
 1958                 if (resume_count > 1) {
 1959                         status = VCHIQ_ERROR;
 1960                         vchiq_log_error(vchiq_susp_log_level, "%s waited too "
 1961                                 "many times for resume" , __func__);
 1962                         goto out;
 1963                 }
 1964                 write_unlock_bh(&arm_state->susp_res_lock);
 1965                 vchiq_log_info(vchiq_susp_log_level, "%s wait for resume",
 1966                         __func__);
 1967                 if (wait_for_completion_interruptible_timeout(
 1968                                 &arm_state->vc_resume_complete, timeout_val)
 1969                                         <= 0) {
 1970                         vchiq_log_error(vchiq_susp_log_level, "%s wait for "
 1971                                 "resume failed (%s)", __func__,
 1972                                 resume_state_names[arm_state->vc_resume_state +
 1973                                                         VC_RESUME_NUM_OFFSET]);
 1974                         status = VCHIQ_ERROR;
 1975                         write_lock_bh(&arm_state->susp_res_lock);
 1976                         goto out;
 1977                 }
 1978                 vchiq_log_info(vchiq_susp_log_level, "%s resumed", __func__);
 1979                 write_lock_bh(&arm_state->susp_res_lock);
 1980                 resume_count++;
 1981         }
 1982         /* TODO: reinit_completion */
 1983         INIT_COMPLETION(arm_state->resume_blocker);
 1984         arm_state->resume_blocked = 1;
 1985 
 1986 out:
 1987         return status;
 1988 }
 1989 
 1990 static inline void
 1991 unblock_resume(VCHIQ_ARM_STATE_T *arm_state)
 1992 {
 1993         complete_all(&arm_state->resume_blocker);
 1994         arm_state->resume_blocked = 0;
 1995 }
 1996 
 1997 /* Initiate suspend via slot handler. Should be called with the write lock
 1998  * held */
 1999 VCHIQ_STATUS_T
 2000 vchiq_arm_vcsuspend(VCHIQ_STATE_T *state)
 2001 {
 2002         VCHIQ_STATUS_T status = VCHIQ_ERROR;
 2003         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2004 
 2005         if (!arm_state)
 2006                 goto out;
 2007 
 2008         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2009         status = VCHIQ_SUCCESS;
 2010 
 2011 
 2012         switch (arm_state->vc_suspend_state) {
 2013         case VC_SUSPEND_REQUESTED:
 2014                 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already "
 2015                         "requested", __func__);
 2016                 break;
 2017         case VC_SUSPEND_IN_PROGRESS:
 2018                 vchiq_log_info(vchiq_susp_log_level, "%s: suspend already in "
 2019                         "progress", __func__);
 2020                 break;
 2021 
 2022         default:
 2023                 /* We don't expect to be in other states, so log but continue
 2024                  * anyway */
 2025                 vchiq_log_error(vchiq_susp_log_level,
 2026                         "%s unexpected suspend state %s", __func__,
 2027                         suspend_state_names[arm_state->vc_suspend_state +
 2028                                                 VC_SUSPEND_NUM_OFFSET]);
 2029                 /* fall through */
 2030         case VC_SUSPEND_REJECTED:
 2031         case VC_SUSPEND_FAILED:
 2032                 /* Ensure any idle state actions have been run */
 2033                 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
 2034                 /* fall through */
 2035         case VC_SUSPEND_IDLE:
 2036                 vchiq_log_info(vchiq_susp_log_level,
 2037                         "%s: suspending", __func__);
 2038                 set_suspend_state(arm_state, VC_SUSPEND_REQUESTED);
 2039                 /* kick the slot handler thread to initiate suspend */
 2040                 request_poll(state, NULL, 0);
 2041                 break;
 2042         }
 2043 
 2044 out:
 2045         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
 2046         return status;
 2047 }
 2048 
 2049 void
 2050 vchiq_platform_check_suspend(VCHIQ_STATE_T *state)
 2051 {
 2052         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2053         int susp = 0;
 2054 
 2055         if (!arm_state)
 2056                 goto out;
 2057 
 2058         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2059 
 2060         write_lock_bh(&arm_state->susp_res_lock);
 2061         if (arm_state->vc_suspend_state == VC_SUSPEND_REQUESTED &&
 2062                         arm_state->vc_resume_state == VC_RESUME_RESUMED) {
 2063                 set_suspend_state(arm_state, VC_SUSPEND_IN_PROGRESS);
 2064                 susp = 1;
 2065         }
 2066         write_unlock_bh(&arm_state->susp_res_lock);
 2067 
 2068         if (susp)
 2069                 vchiq_platform_suspend(state);
 2070 
 2071 out:
 2072         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
 2073         return;
 2074 }
 2075 
 2076 
 2077 static void
 2078 output_timeout_error(VCHIQ_STATE_T *state)
 2079 {
 2080         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2081         char service_err[50] = "";
 2082         int vc_use_count = arm_state->videocore_use_count;
 2083         int active_services = state->unused_service;
 2084         int i;
 2085 
 2086         if (!arm_state->videocore_use_count) {
 2087                 snprintf(service_err, 50, " Videocore usecount is 0");
 2088                 goto output_msg;
 2089         }
 2090         for (i = 0; i < active_services; i++) {
 2091                 VCHIQ_SERVICE_T *service_ptr = state->services[i];
 2092                 if (service_ptr && service_ptr->service_use_count &&
 2093                         (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE)) {
 2094                         snprintf(service_err, 50, " %c%c%c%c(%8x) service has "
 2095                                 "use count %d%s", VCHIQ_FOURCC_AS_4CHARS(
 2096                                         service_ptr->base.fourcc),
 2097                                  service_ptr->client_id,
 2098                                  service_ptr->service_use_count,
 2099                                  service_ptr->service_use_count ==
 2100                                          vc_use_count ? "" : " (+ more)");
 2101                         break;
 2102                 }
 2103         }
 2104 
 2105 output_msg:
 2106         vchiq_log_error(vchiq_susp_log_level,
 2107                 "timed out waiting for vc suspend (%d).%s",
 2108                  arm_state->autosuspend_override, service_err);
 2109 
 2110 }
 2111 
 2112 /* Try to get videocore into suspended state, regardless of autosuspend state.
 2113 ** We don't actually force suspend, since videocore may get into a bad state
 2114 ** if we force suspend at a bad time.  Instead, we wait for autosuspend to
 2115 ** determine a good point to suspend.  If this doesn't happen within 100ms we
 2116 ** report failure.
 2117 **
 2118 ** Returns VCHIQ_SUCCESS if videocore suspended successfully, VCHIQ_RETRY if
 2119 ** videocore failed to suspend in time or VCHIQ_ERROR if interrupted.
 2120 */
 2121 VCHIQ_STATUS_T
 2122 vchiq_arm_force_suspend(VCHIQ_STATE_T *state)
 2123 {
 2124         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2125         VCHIQ_STATUS_T status = VCHIQ_ERROR;
 2126         long rc = 0;
 2127         int repeat = -1;
 2128 
 2129         if (!arm_state)
 2130                 goto out;
 2131 
 2132         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2133 
 2134         write_lock_bh(&arm_state->susp_res_lock);
 2135 
 2136         status = block_resume(arm_state);
 2137         if (status != VCHIQ_SUCCESS)
 2138                 goto unlock;
 2139         if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
 2140                 /* Already suspended - just block resume and exit */
 2141                 vchiq_log_info(vchiq_susp_log_level, "%s already suspended",
 2142                         __func__);
 2143                 status = VCHIQ_SUCCESS;
 2144                 goto unlock;
 2145         } else if (arm_state->vc_suspend_state <= VC_SUSPEND_IDLE) {
 2146                 /* initiate suspend immediately in the case that we're waiting
 2147                  * for the timeout */
 2148                 stop_suspend_timer(arm_state);
 2149                 if (!vchiq_videocore_wanted(state)) {
 2150                         vchiq_log_info(vchiq_susp_log_level, "%s videocore "
 2151                                 "idle, initiating suspend", __func__);
 2152                         status = vchiq_arm_vcsuspend(state);
 2153                 } else if (arm_state->autosuspend_override <
 2154                                                 FORCE_SUSPEND_FAIL_MAX) {
 2155                         vchiq_log_info(vchiq_susp_log_level, "%s letting "
 2156                                 "videocore go idle", __func__);
 2157                         status = VCHIQ_SUCCESS;
 2158                 } else {
 2159                         vchiq_log_warning(vchiq_susp_log_level, "%s failed too "
 2160                                 "many times - attempting suspend", __func__);
 2161                         status = vchiq_arm_vcsuspend(state);
 2162                 }
 2163         } else {
 2164                 vchiq_log_info(vchiq_susp_log_level, "%s videocore suspend "
 2165                         "in progress - wait for completion", __func__);
 2166                 status = VCHIQ_SUCCESS;
 2167         }
 2168 
 2169         /* Wait for suspend to happen due to system idle (not forced..) */
 2170         if (status != VCHIQ_SUCCESS)
 2171                 goto unblock_resume;
 2172 
 2173         do {
 2174                 write_unlock_bh(&arm_state->susp_res_lock);
 2175 
 2176                 rc = wait_for_completion_interruptible_timeout(
 2177                                 &arm_state->vc_suspend_complete,
 2178                                 msecs_to_jiffies(FORCE_SUSPEND_TIMEOUT_MS));
 2179 
 2180                 write_lock_bh(&arm_state->susp_res_lock);
 2181                 if (rc < 0) {
 2182                         vchiq_log_warning(vchiq_susp_log_level, "%s "
 2183                                 "interrupted waiting for suspend", __func__);
 2184                         status = VCHIQ_ERROR;
 2185                         goto unblock_resume;
 2186                 } else if (rc == 0) {
 2187                         if (arm_state->vc_suspend_state > VC_SUSPEND_IDLE) {
 2188                                 /* Repeat timeout once if in progress */
 2189                                 if (repeat < 0) {
 2190                                         repeat = 1;
 2191                                         continue;
 2192                                 }
 2193                         }
 2194                         arm_state->autosuspend_override++;
 2195                         output_timeout_error(state);
 2196 
 2197                         status = VCHIQ_RETRY;
 2198                         goto unblock_resume;
 2199                 }
 2200         } while (0 < (repeat--));
 2201 
 2202         /* Check and report state in case we need to abort ARM suspend */
 2203         if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED) {
 2204                 status = VCHIQ_RETRY;
 2205                 vchiq_log_error(vchiq_susp_log_level,
 2206                         "%s videocore suspend failed (state %s)", __func__,
 2207                         suspend_state_names[arm_state->vc_suspend_state +
 2208                                                 VC_SUSPEND_NUM_OFFSET]);
 2209                 /* Reset the state only if it's still in an error state.
 2210                  * Something could have already initiated another suspend. */
 2211                 if (arm_state->vc_suspend_state < VC_SUSPEND_IDLE)
 2212                         set_suspend_state(arm_state, VC_SUSPEND_IDLE);
 2213 
 2214                 goto unblock_resume;
 2215         }
 2216 
 2217         /* successfully suspended - unlock and exit */
 2218         goto unlock;
 2219 
 2220 unblock_resume:
 2221         /* all error states need to unblock resume before exit */
 2222         unblock_resume(arm_state);
 2223 
 2224 unlock:
 2225         write_unlock_bh(&arm_state->susp_res_lock);
 2226 
 2227 out:
 2228         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, status);
 2229         return status;
 2230 }
 2231 
 2232 void
 2233 vchiq_check_suspend(VCHIQ_STATE_T *state)
 2234 {
 2235         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2236 
 2237         if (!arm_state)
 2238                 goto out;
 2239 
 2240         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2241 
 2242         write_lock_bh(&arm_state->susp_res_lock);
 2243         if (arm_state->vc_suspend_state != VC_SUSPEND_SUSPENDED &&
 2244                         arm_state->first_connect &&
 2245                         !vchiq_videocore_wanted(state)) {
 2246                 vchiq_arm_vcsuspend(state);
 2247         }
 2248         write_unlock_bh(&arm_state->susp_res_lock);
 2249 
 2250 out:
 2251         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
 2252         return;
 2253 }
 2254 
 2255 
 2256 int
 2257 vchiq_arm_allow_resume(VCHIQ_STATE_T *state)
 2258 {
 2259         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2260         int resume = 0;
 2261         int ret = -1;
 2262 
 2263         if (!arm_state)
 2264                 goto out;
 2265 
 2266         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2267 
 2268         write_lock_bh(&arm_state->susp_res_lock);
 2269         unblock_resume(arm_state);
 2270         resume = vchiq_check_resume(state);
 2271         write_unlock_bh(&arm_state->susp_res_lock);
 2272 
 2273         if (resume) {
 2274                 if (wait_for_completion_interruptible(
 2275                         &arm_state->vc_resume_complete) < 0) {
 2276                         vchiq_log_error(vchiq_susp_log_level,
 2277                                 "%s interrupted", __func__);
 2278                         /* failed, cannot accurately derive suspend
 2279                          * state, so exit early. */
 2280                         goto out;
 2281                 }
 2282         }
 2283 
 2284         read_lock_bh(&arm_state->susp_res_lock);
 2285         if (arm_state->vc_suspend_state == VC_SUSPEND_SUSPENDED) {
 2286                 vchiq_log_info(vchiq_susp_log_level,
 2287                                 "%s: Videocore remains suspended", __func__);
 2288         } else {
 2289                 vchiq_log_info(vchiq_susp_log_level,
 2290                                 "%s: Videocore resumed", __func__);
 2291                 ret = 0;
 2292         }
 2293         read_unlock_bh(&arm_state->susp_res_lock);
 2294 out:
 2295         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
 2296         return ret;
 2297 }
 2298 
 2299 /* This function should be called with the write lock held */
 2300 int
 2301 vchiq_check_resume(VCHIQ_STATE_T *state)
 2302 {
 2303         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2304         int resume = 0;
 2305 
 2306         if (!arm_state)
 2307                 goto out;
 2308 
 2309         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2310 
 2311         if (need_resume(state)) {
 2312                 set_resume_state(arm_state, VC_RESUME_REQUESTED);
 2313                 request_poll(state, NULL, 0);
 2314                 resume = 1;
 2315         }
 2316 
 2317 out:
 2318         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
 2319         return resume;
 2320 }
 2321 
 2322 #ifdef notyet
 2323 void
 2324 vchiq_platform_check_resume(VCHIQ_STATE_T *state)
 2325 {
 2326         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2327         int res = 0;
 2328 
 2329         if (!arm_state)
 2330                 goto out;
 2331 
 2332         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2333 
 2334         write_lock_bh(&arm_state->susp_res_lock);
 2335         if (arm_state->wake_address == 0) {
 2336                 vchiq_log_info(vchiq_susp_log_level,
 2337                                         "%s: already awake", __func__);
 2338                 goto unlock;
 2339         }
 2340         if (arm_state->vc_resume_state == VC_RESUME_IN_PROGRESS) {
 2341                 vchiq_log_info(vchiq_susp_log_level,
 2342                                         "%s: already resuming", __func__);
 2343                 goto unlock;
 2344         }
 2345 
 2346         if (arm_state->vc_resume_state == VC_RESUME_REQUESTED) {
 2347                 set_resume_state(arm_state, VC_RESUME_IN_PROGRESS);
 2348                 res = 1;
 2349         } else
 2350                 vchiq_log_trace(vchiq_susp_log_level,
 2351                                 "%s: not resuming (resume state %s)", __func__,
 2352                                 resume_state_names[arm_state->vc_resume_state +
 2353                                                         VC_RESUME_NUM_OFFSET]);
 2354 
 2355 unlock:
 2356         write_unlock_bh(&arm_state->susp_res_lock);
 2357 
 2358         if (res)
 2359                 vchiq_platform_resume(state);
 2360 
 2361 out:
 2362         vchiq_log_trace(vchiq_susp_log_level, "%s exit", __func__);
 2363         return;
 2364 
 2365 }
 2366 #endif
 2367 
 2368 
 2369 
 2370 VCHIQ_STATUS_T
 2371 vchiq_use_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service,
 2372                 enum USE_TYPE_E use_type)
 2373 {
 2374         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2375         VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
 2376         char entity[16];
 2377         int *entity_uc;
 2378         int local_uc, local_entity_uc;
 2379 
 2380         if (!arm_state)
 2381                 goto out;
 2382 
 2383         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2384 
 2385         if (use_type == USE_TYPE_VCHIQ) {
 2386                 snprintf(entity, sizeof(entity), "VCHIQ:   ");
 2387                 entity_uc = &arm_state->peer_use_count;
 2388         } else if (service) {
 2389                 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
 2390                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
 2391                         service->client_id);
 2392                 entity_uc = &service->service_use_count;
 2393         } else {
 2394                 vchiq_log_error(vchiq_susp_log_level, "%s null service "
 2395                                 "ptr", __func__);
 2396                 ret = VCHIQ_ERROR;
 2397                 goto out;
 2398         }
 2399 
 2400         write_lock_bh(&arm_state->susp_res_lock);
 2401         while (arm_state->resume_blocked) {
 2402                 /* If we call 'use' while force suspend is waiting for suspend,
 2403                  * then we're about to block the thread which the force is
 2404                  * waiting to complete, so we're bound to just time out. In this
 2405                  * case, set the suspend state such that the wait will be
 2406                  * canceled, so we can complete as quickly as possible. */
 2407                 if (arm_state->resume_blocked && arm_state->vc_suspend_state ==
 2408                                 VC_SUSPEND_IDLE) {
 2409                         set_suspend_state(arm_state, VC_SUSPEND_FORCE_CANCELED);
 2410                         break;
 2411                 }
 2412                 /* If suspend is already in progress then we need to block */
 2413                 if (!try_wait_for_completion(&arm_state->resume_blocker)) {
 2414                         /* Indicate that there are threads waiting on the resume
 2415                          * blocker.  These need to be allowed to complete before
 2416                          * a _second_ call to force suspend can complete,
 2417                          * otherwise low priority threads might never actually
 2418                          * continue */
 2419                         arm_state->blocked_count++;
 2420                         write_unlock_bh(&arm_state->susp_res_lock);
 2421                         vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
 2422                                 "blocked - waiting...", __func__, entity);
 2423                         if (wait_for_completion_killable(
 2424                                         &arm_state->resume_blocker) != 0) {
 2425                                 vchiq_log_error(vchiq_susp_log_level, "%s %s "
 2426                                         "wait for resume blocker interrupted",
 2427                                         __func__, entity);
 2428                                 ret = VCHIQ_ERROR;
 2429                                 write_lock_bh(&arm_state->susp_res_lock);
 2430                                 arm_state->blocked_count--;
 2431                                 write_unlock_bh(&arm_state->susp_res_lock);
 2432                                 goto out;
 2433                         }
 2434                         vchiq_log_info(vchiq_susp_log_level, "%s %s resume "
 2435                                 "unblocked", __func__, entity);
 2436                         write_lock_bh(&arm_state->susp_res_lock);
 2437                         if (--arm_state->blocked_count == 0)
 2438                                 complete_all(&arm_state->blocked_blocker);
 2439                 }
 2440         }
 2441 
 2442         stop_suspend_timer(arm_state);
 2443 
 2444         local_uc = ++arm_state->videocore_use_count;
 2445         local_entity_uc = ++(*entity_uc);
 2446 
 2447         /* If there's a pending request which hasn't yet been serviced then
 2448          * just clear it.  If we're past VC_SUSPEND_REQUESTED state then
 2449          * vc_resume_complete will block until we either resume or fail to
 2450          * suspend */
 2451         if (arm_state->vc_suspend_state <= VC_SUSPEND_REQUESTED)
 2452                 set_suspend_state(arm_state, VC_SUSPEND_IDLE);
 2453 
 2454         if ((use_type != USE_TYPE_SERVICE_NO_RESUME) && need_resume(state)) {
 2455                 set_resume_state(arm_state, VC_RESUME_REQUESTED);
 2456                 vchiq_log_info(vchiq_susp_log_level,
 2457                         "%s %s count %d, state count %d",
 2458                         __func__, entity, local_entity_uc, local_uc);
 2459                 request_poll(state, NULL, 0);
 2460         } else
 2461                 vchiq_log_trace(vchiq_susp_log_level,
 2462                         "%s %s count %d, state count %d",
 2463                         __func__, entity, *entity_uc, local_uc);
 2464 
 2465 
 2466         write_unlock_bh(&arm_state->susp_res_lock);
 2467 
 2468         /* Completion is in a done state when we're not suspended, so this won't
 2469          * block for the non-suspended case. */
 2470         if (!try_wait_for_completion(&arm_state->vc_resume_complete)) {
 2471                 vchiq_log_info(vchiq_susp_log_level, "%s %s wait for resume",
 2472                         __func__, entity);
 2473                 if (wait_for_completion_killable(
 2474                                 &arm_state->vc_resume_complete) != 0) {
 2475                         vchiq_log_error(vchiq_susp_log_level, "%s %s wait for "
 2476                                 "resume interrupted", __func__, entity);
 2477                         ret = VCHIQ_ERROR;
 2478                         goto out;
 2479                 }
 2480                 vchiq_log_info(vchiq_susp_log_level, "%s %s resumed", __func__,
 2481                         entity);
 2482         }
 2483 
 2484         if (ret == VCHIQ_SUCCESS) {
 2485                 VCHIQ_STATUS_T status = VCHIQ_SUCCESS;
 2486                 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
 2487                 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
 2488                         /* Send the use notify to videocore */
 2489                         status = vchiq_send_remote_use_active(state);
 2490                         if (status == VCHIQ_SUCCESS)
 2491                                 ack_cnt--;
 2492                         else
 2493                                 atomic_add(ack_cnt,
 2494                                         &arm_state->ka_use_ack_count);
 2495                 }
 2496         }
 2497 
 2498 out:
 2499         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
 2500         return ret;
 2501 }
 2502 
 2503 VCHIQ_STATUS_T
 2504 vchiq_release_internal(VCHIQ_STATE_T *state, VCHIQ_SERVICE_T *service)
 2505 {
 2506         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2507         VCHIQ_STATUS_T ret = VCHIQ_SUCCESS;
 2508         char entity[16];
 2509         int *entity_uc;
 2510 
 2511         if (!arm_state)
 2512                 goto out;
 2513 
 2514         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2515 
 2516         if (service) {
 2517                 snprintf(entity, sizeof(entity), "%c%c%c%c:%8x",
 2518                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
 2519                         service->client_id);
 2520                 entity_uc = &service->service_use_count;
 2521         } else {
 2522                 snprintf(entity, sizeof(entity), "PEER:   ");
 2523                 entity_uc = &arm_state->peer_use_count;
 2524         }
 2525 
 2526         write_lock_bh(&arm_state->susp_res_lock);
 2527         if (!arm_state->videocore_use_count || !(*entity_uc)) {
 2528                 /* Don't use BUG_ON - don't allow user thread to crash kernel */
 2529                 WARN_ON(!arm_state->videocore_use_count);
 2530                 WARN_ON(!(*entity_uc));
 2531                 ret = VCHIQ_ERROR;
 2532                 goto unlock;
 2533         }
 2534         --arm_state->videocore_use_count;
 2535         --(*entity_uc);
 2536 
 2537         if (!vchiq_videocore_wanted(state)) {
 2538                 if (vchiq_platform_use_suspend_timer() &&
 2539                                 !arm_state->resume_blocked) {
 2540                         /* Only use the timer if we're not trying to force
 2541                          * suspend (=> resume_blocked) */
 2542                         start_suspend_timer(arm_state);
 2543                 } else {
 2544                         vchiq_log_info(vchiq_susp_log_level,
 2545                                 "%s %s count %d, state count %d - suspending",
 2546                                 __func__, entity, *entity_uc,
 2547                                 arm_state->videocore_use_count);
 2548                         vchiq_arm_vcsuspend(state);
 2549                 }
 2550         } else
 2551                 vchiq_log_trace(vchiq_susp_log_level,
 2552                         "%s %s count %d, state count %d",
 2553                         __func__, entity, *entity_uc,
 2554                         arm_state->videocore_use_count);
 2555 
 2556 unlock:
 2557         write_unlock_bh(&arm_state->susp_res_lock);
 2558 
 2559 out:
 2560         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
 2561         return ret;
 2562 }
 2563 
 2564 void
 2565 vchiq_on_remote_use(VCHIQ_STATE_T *state)
 2566 {
 2567         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2568         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2569         atomic_inc(&arm_state->ka_use_count);
 2570         complete(&arm_state->ka_evt);
 2571 }
 2572 
 2573 void
 2574 vchiq_on_remote_release(VCHIQ_STATE_T *state)
 2575 {
 2576         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2577         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2578         atomic_inc(&arm_state->ka_release_count);
 2579         complete(&arm_state->ka_evt);
 2580 }
 2581 
 2582 VCHIQ_STATUS_T
 2583 vchiq_use_service_internal(VCHIQ_SERVICE_T *service)
 2584 {
 2585         return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
 2586 }
 2587 
 2588 VCHIQ_STATUS_T
 2589 vchiq_release_service_internal(VCHIQ_SERVICE_T *service)
 2590 {
 2591         return vchiq_release_internal(service->state, service);
 2592 }
 2593 
 2594 static void suspend_timer_callback(unsigned long context)
 2595 {
 2596         VCHIQ_STATE_T *state = (VCHIQ_STATE_T *)context;
 2597         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2598         if (!arm_state)
 2599                 goto out;
 2600         vchiq_log_info(vchiq_susp_log_level,
 2601                 "%s - suspend timer expired - check suspend", __func__);
 2602         vchiq_check_suspend(state);
 2603 out:
 2604         return;
 2605 }
 2606 
 2607 VCHIQ_STATUS_T
 2608 vchiq_use_service_no_resume(VCHIQ_SERVICE_HANDLE_T handle)
 2609 {
 2610         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
 2611         VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
 2612         if (service) {
 2613                 ret = vchiq_use_internal(service->state, service,
 2614                                 USE_TYPE_SERVICE_NO_RESUME);
 2615                 unlock_service(service);
 2616         }
 2617         return ret;
 2618 }
 2619 
 2620 VCHIQ_STATUS_T
 2621 vchiq_use_service(VCHIQ_SERVICE_HANDLE_T handle)
 2622 {
 2623         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
 2624         VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
 2625         if (service) {
 2626                 ret = vchiq_use_internal(service->state, service,
 2627                                 USE_TYPE_SERVICE);
 2628                 unlock_service(service);
 2629         }
 2630         return ret;
 2631 }
 2632 
 2633 VCHIQ_STATUS_T
 2634 vchiq_release_service(VCHIQ_SERVICE_HANDLE_T handle)
 2635 {
 2636         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
 2637         VCHIQ_SERVICE_T *service = find_service_by_handle(handle);
 2638         if (service) {
 2639                 ret = vchiq_release_internal(service->state, service);
 2640                 unlock_service(service);
 2641         }
 2642         return ret;
 2643 }
 2644 
 2645 void
 2646 vchiq_dump_service_use_state(VCHIQ_STATE_T *state)
 2647 {
 2648         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2649         int i, j = 0;
 2650         /* Only dump 64 services */
 2651         static const int local_max_services = 64;
 2652         /* If there's more than 64 services, only dump ones with
 2653          * non-zero counts */
 2654         int only_nonzero = 0;
 2655         static const char *nz = "<-- preventing suspend";
 2656 
 2657         enum vc_suspend_status vc_suspend_state;
 2658         enum vc_resume_status  vc_resume_state;
 2659         int peer_count;
 2660         int vc_use_count;
 2661         int active_services;
 2662         struct service_data_struct {
 2663                 int fourcc;
 2664                 int clientid;
 2665                 int use_count;
 2666         } service_data[local_max_services];
 2667 
 2668         if (!arm_state)
 2669                 return;
 2670 
 2671         read_lock_bh(&arm_state->susp_res_lock);
 2672         vc_suspend_state = arm_state->vc_suspend_state;
 2673         vc_resume_state  = arm_state->vc_resume_state;
 2674         peer_count = arm_state->peer_use_count;
 2675         vc_use_count = arm_state->videocore_use_count;
 2676         active_services = state->unused_service;
 2677         if (active_services > local_max_services)
 2678                 only_nonzero = 1;
 2679 
 2680         for (i = 0; (i < active_services) && (j < local_max_services); i++) {
 2681                 VCHIQ_SERVICE_T *service_ptr = state->services[i];
 2682                 if (!service_ptr)
 2683                         continue;
 2684 
 2685                 if (only_nonzero && !service_ptr->service_use_count)
 2686                         continue;
 2687 
 2688                 if (service_ptr->srvstate != VCHIQ_SRVSTATE_FREE) {
 2689                         service_data[j].fourcc = service_ptr->base.fourcc;
 2690                         service_data[j].clientid = service_ptr->client_id;
 2691                         service_data[j++].use_count = service_ptr->
 2692                                                         service_use_count;
 2693                 }
 2694         }
 2695 
 2696         read_unlock_bh(&arm_state->susp_res_lock);
 2697 
 2698         vchiq_log_warning(vchiq_susp_log_level,
 2699                 "-- Videcore suspend state: %s --",
 2700                 suspend_state_names[vc_suspend_state + VC_SUSPEND_NUM_OFFSET]);
 2701         vchiq_log_warning(vchiq_susp_log_level,
 2702                 "-- Videcore resume state: %s --",
 2703                 resume_state_names[vc_resume_state + VC_RESUME_NUM_OFFSET]);
 2704 
 2705         if (only_nonzero)
 2706                 vchiq_log_warning(vchiq_susp_log_level, "Too many active "
 2707                         "services (%d).  Only dumping up to first %d services "
 2708                         "with non-zero use-count", active_services,
 2709                         local_max_services);
 2710 
 2711         for (i = 0; i < j; i++) {
 2712                 vchiq_log_warning(vchiq_susp_log_level,
 2713                         "----- %c%c%c%c:%d service count %d %s",
 2714                         VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
 2715                         service_data[i].clientid,
 2716                         service_data[i].use_count,
 2717                         service_data[i].use_count ? nz : "");
 2718         }
 2719         vchiq_log_warning(vchiq_susp_log_level,
 2720                 "----- VCHIQ use count count %d", peer_count);
 2721         vchiq_log_warning(vchiq_susp_log_level,
 2722                 "--- Overall vchiq instance use count %d", vc_use_count);
 2723 
 2724         vchiq_dump_platform_use_state(state);
 2725 }
 2726 
 2727 VCHIQ_STATUS_T
 2728 vchiq_check_service(VCHIQ_SERVICE_T *service)
 2729 {
 2730         VCHIQ_ARM_STATE_T *arm_state;
 2731         VCHIQ_STATUS_T ret = VCHIQ_ERROR;
 2732 
 2733         if (!service || !service->state)
 2734                 goto out;
 2735 
 2736         vchiq_log_trace(vchiq_susp_log_level, "%s", __func__);
 2737 
 2738         arm_state = vchiq_platform_get_arm_state(service->state);
 2739 
 2740         read_lock_bh(&arm_state->susp_res_lock);
 2741         if (service->service_use_count)
 2742                 ret = VCHIQ_SUCCESS;
 2743         read_unlock_bh(&arm_state->susp_res_lock);
 2744 
 2745         if (ret == VCHIQ_ERROR) {
 2746                 vchiq_log_error(vchiq_susp_log_level,
 2747                         "%s ERROR - %c%c%c%c:%8x service count %d, "
 2748                         "state count %d, videocore suspend state %s", __func__,
 2749                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
 2750                         service->client_id, service->service_use_count,
 2751                         arm_state->videocore_use_count,
 2752                         suspend_state_names[arm_state->vc_suspend_state +
 2753                                                 VC_SUSPEND_NUM_OFFSET]);
 2754                 vchiq_dump_service_use_state(service->state);
 2755         }
 2756 out:
 2757         return ret;
 2758 }
 2759 
 2760 /* stub functions */
 2761 void vchiq_on_remote_use_active(VCHIQ_STATE_T *state)
 2762 {
 2763         (void)state;
 2764 }
 2765 
 2766 void vchiq_platform_conn_state_changed(VCHIQ_STATE_T *state,
 2767         VCHIQ_CONNSTATE_T oldstate, VCHIQ_CONNSTATE_T newstate)
 2768 {
 2769         VCHIQ_ARM_STATE_T *arm_state = vchiq_platform_get_arm_state(state);
 2770         vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
 2771                 get_conn_state_name(oldstate), get_conn_state_name(newstate));
 2772         if (state->conn_state == VCHIQ_CONNSTATE_CONNECTED) {
 2773                 write_lock_bh(&arm_state->susp_res_lock);
 2774                 if (!arm_state->first_connect) {
 2775                         char threadname[10];
 2776                         arm_state->first_connect = 1;
 2777                         write_unlock_bh(&arm_state->susp_res_lock);
 2778                         snprintf(threadname, sizeof(threadname), "VCHIQka-%d",
 2779                                 state->id);
 2780                         arm_state->ka_thread = vchiq_thread_create(
 2781                                 &vchiq_keepalive_thread_func,
 2782                                 (void *)state,
 2783                                 threadname);
 2784                         if (arm_state->ka_thread == NULL) {
 2785                                 vchiq_log_error(vchiq_susp_log_level,
 2786                                         "vchiq: FATAL: couldn't create thread %s",
 2787                                         threadname);
 2788                         } else {
 2789                                 wake_up_process(arm_state->ka_thread);
 2790                         }
 2791                 } else
 2792                         write_unlock_bh(&arm_state->susp_res_lock);
 2793         }
 2794 }
 2795 
 2796 /****************************************************************************
 2797 *
 2798 *   vchiq_init - called when the module is loaded.
 2799 *
 2800 ***************************************************************************/
 2801 
 2802 int __init vchiq_init(void);
 2803 int __init
 2804 vchiq_init(void)
 2805 {
 2806         int err;
 2807 
 2808 #ifdef notyet
 2809         /* create proc entries */
 2810         err = vchiq_proc_init();
 2811         if (err != 0)
 2812                 goto failed_proc_init;
 2813 #endif
 2814 
 2815         vchiq_cdev = make_dev(&vchiq_cdevsw, 0,
 2816             UID_ROOT, GID_WHEEL, 0600, "vchiq");
 2817         if (!vchiq_cdev) {
 2818                 printf("Failed to create /dev/vchiq");
 2819                 return (-ENXIO);
 2820         }
 2821 
 2822         spin_lock_init(&msg_queue_spinlock);
 2823 
 2824         err = vchiq_platform_init(&g_state);
 2825         if (err != 0)
 2826                 goto failed_platform_init;
 2827 
 2828         vchiq_log_info(vchiq_arm_log_level,
 2829                 "vchiq: initialised - version %d (min %d)",
 2830                 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
 2831 
 2832         return 0;
 2833 
 2834 failed_platform_init:
 2835         if (vchiq_cdev) {
 2836                 destroy_dev(vchiq_cdev);
 2837                 vchiq_cdev = NULL;
 2838         }
 2839         vchiq_log_warning(vchiq_arm_log_level, "could not load vchiq");
 2840         return err;
 2841 }
 2842 
 2843 #ifdef notyet
 2844 static int vchiq_instance_get_use_count(VCHIQ_INSTANCE_T instance)
 2845 {
 2846         VCHIQ_SERVICE_T *service;
 2847         int use_count = 0, i;
 2848         i = 0;
 2849         while ((service = next_service_by_instance(instance->state,
 2850                 instance, &i)) != NULL) {
 2851                 use_count += service->service_use_count;
 2852                 unlock_service(service);
 2853         }
 2854         return use_count;
 2855 }
 2856 
 2857 /* read the per-process use-count */
 2858 static int proc_read_use_count(char *page, char **start,
 2859                                off_t off, int count,
 2860                                int *eof, void *data)
 2861 {
 2862         VCHIQ_INSTANCE_T instance = data;
 2863         int len, use_count;
 2864 
 2865         use_count = vchiq_instance_get_use_count(instance);
 2866         len = snprintf(page+off, count, "%d\n", use_count);
 2867 
 2868         return len;
 2869 }
 2870 
 2871 /* add an instance (process) to the proc entries */
 2872 static int vchiq_proc_add_instance(VCHIQ_INSTANCE_T instance)
 2873 {
 2874         char pidstr[32];
 2875         struct proc_dir_entry *top, *use_count;
 2876         struct proc_dir_entry *clients = vchiq_clients_top();
 2877         int pid = instance->pid;
 2878 
 2879         snprintf(pidstr, sizeof(pidstr), "%d", pid);
 2880         top = proc_mkdir(pidstr, clients);
 2881         if (!top)
 2882                 goto fail_top;
 2883 
 2884         use_count = create_proc_read_entry("use_count",
 2885                                            0444, top,
 2886                                            proc_read_use_count,
 2887                                            instance);
 2888         if (!use_count)
 2889                 goto fail_use_count;
 2890 
 2891         instance->proc_entry = top;
 2892 
 2893         return 0;
 2894 
 2895 fail_use_count:
 2896         remove_proc_entry(top->name, clients);
 2897 fail_top:
 2898         return -ENOMEM;
 2899 }
 2900 
 2901 static void vchiq_proc_remove_instance(VCHIQ_INSTANCE_T instance)
 2902 {
 2903         struct proc_dir_entry *clients = vchiq_clients_top();
 2904         remove_proc_entry("use_count", instance->proc_entry);
 2905         remove_proc_entry(instance->proc_entry->name, clients);
 2906 }
 2907 
 2908 #endif
 2909 
 2910 /****************************************************************************
 2911 *
 2912 *   vchiq_exit - called when the module is unloaded.
 2913 *
 2914 ***************************************************************************/
 2915 
 2916 void vchiq_exit(void);
 2917 void
 2918 vchiq_exit(void)
 2919 {
 2920 
 2921         vchiq_platform_exit(&g_state);
 2922         if (vchiq_cdev) {
 2923                 destroy_dev(vchiq_cdev);
 2924                 vchiq_cdev = NULL;
 2925         }
 2926 }

Cache object: fc1539d93c62569b938cd5cf4cba9d0b


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.