The Design and Implementation of the FreeBSD Operating System, Second Edition
Now available: The Design and Implementation of the FreeBSD Operating System (Second Edition)


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]

FreeBSD/Linux Kernel Cross Reference
sys/dev/ipmi/ipmi.c

Version: -  FREEBSD  -  FREEBSD-13-STABLE  -  FREEBSD-13-0  -  FREEBSD-12-STABLE  -  FREEBSD-12-0  -  FREEBSD-11-STABLE  -  FREEBSD-11-0  -  FREEBSD-10-STABLE  -  FREEBSD-10-0  -  FREEBSD-9-STABLE  -  FREEBSD-9-0  -  FREEBSD-8-STABLE  -  FREEBSD-8-0  -  FREEBSD-7-STABLE  -  FREEBSD-7-0  -  FREEBSD-6-STABLE  -  FREEBSD-6-0  -  FREEBSD-5-STABLE  -  FREEBSD-5-0  -  FREEBSD-4-STABLE  -  FREEBSD-3-STABLE  -  FREEBSD22  -  l41  -  OPENBSD  -  linux-2.6  -  MK84  -  PLAN9  -  xnu-8792 
SearchContext: -  none  -  3  -  10 

    1 /*-
    2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
    3  *
    4  * Copyright (c) 2006 IronPort Systems Inc. <ambrisko@ironport.com>
    5  * All rights reserved.
    6  *
    7  * Redistribution and use in source and binary forms, with or without
    8  * modification, are permitted provided that the following conditions
    9  * are met:
   10  * 1. Redistributions of source code must retain the above copyright
   11  *    notice, this list of conditions and the following disclaimer.
   12  * 2. Redistributions in binary form must reproduce the above copyright
   13  *    notice, this list of conditions and the following disclaimer in the
   14  *    documentation and/or other materials provided with the distribution.
   15  *
   16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
   17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
   18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
   19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
   20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
   21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
   22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
   23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
   24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
   25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
   26  * SUCH DAMAGE.
   27  */
   28 
   29 #include <sys/cdefs.h>
   30 __FBSDID("$FreeBSD$");
   31 
   32 #include <sys/param.h>
   33 #include <sys/systm.h>
   34 #include <sys/bus.h>
   35 #include <sys/condvar.h>
   36 #include <sys/conf.h>
   37 #include <sys/eventhandler.h>
   38 #include <sys/kernel.h>
   39 #include <sys/lock.h>
   40 #include <sys/malloc.h>
   41 #include <sys/module.h>
   42 #include <sys/mutex.h>
   43 #include <sys/poll.h>
   44 #include <sys/reboot.h>
   45 #include <sys/rman.h>
   46 #include <sys/selinfo.h>
   47 #include <sys/sysctl.h>
   48 #include <sys/watchdog.h>
   49 
   50 #ifdef LOCAL_MODULE
   51 #include <ipmi.h>
   52 #include <ipmivars.h>
   53 #else
   54 #include <sys/ipmi.h>
   55 #include <dev/ipmi/ipmivars.h>
   56 #endif
   57 
   58 #ifdef IPMICTL_SEND_COMMAND_32
   59 #include <sys/abi_compat.h>
   60 #endif
   61 
   62 /*
   63  * Driver request structures are allocated on the stack via alloca() to
   64  * avoid calling malloc(), especially for the watchdog handler.
   65  * To avoid too much stack growth, a previously allocated structure can
   66  * be reused via IPMI_INIT_DRIVER_REQUEST(), but the caller should ensure
   67  * that there is adequate reply/request space in the original allocation.
   68  */
   69 #define IPMI_INIT_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)      \
   70         bzero((req), sizeof(struct ipmi_request));                      \
   71         ipmi_init_request((req), NULL, 0, (addr), (cmd), (reqlen), (replylen))
   72 
   73 #define IPMI_ALLOC_DRIVER_REQUEST(req, addr, cmd, reqlen, replylen)     \
   74         (req) = __builtin_alloca(sizeof(struct ipmi_request) +          \
   75             (reqlen) + (replylen));                                     \
   76         IPMI_INIT_DRIVER_REQUEST((req), (addr), (cmd), (reqlen),        \
   77             (replylen))
   78 
   79 static d_ioctl_t ipmi_ioctl;
   80 static d_poll_t ipmi_poll;
   81 static d_open_t ipmi_open;
   82 static void ipmi_dtor(void *arg);
   83 
   84 int ipmi_attached = 0;
   85 
   86 static int on = 1;
   87 static bool wd_in_shutdown = false;
   88 static int wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
   89 static int wd_shutdown_countdown = 0; /* sec */
   90 static int wd_startup_countdown = 0; /* sec */
   91 static int wd_pretimeout_countdown = 120; /* sec */
   92 static int cycle_wait = 10; /* sec */
   93 static int wd_init_enable = 1;
   94 
   95 static SYSCTL_NODE(_hw, OID_AUTO, ipmi, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
   96     "IPMI driver parameters");
   97 SYSCTL_INT(_hw_ipmi, OID_AUTO, on, CTLFLAG_RWTUN,
   98         &on, 0, "");
   99 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_init_enable, CTLFLAG_RWTUN,
  100         &wd_init_enable, 1, "Enable watchdog initialization");
  101 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_timer_actions, CTLFLAG_RWTUN,
  102         &wd_timer_actions, 0,
  103         "IPMI watchdog timer actions (including pre-timeout interrupt)");
  104 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_shutdown_countdown, CTLFLAG_RWTUN,
  105         &wd_shutdown_countdown, 0,
  106         "IPMI watchdog countdown for shutdown (seconds)");
  107 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_startup_countdown, CTLFLAG_RDTUN,
  108         &wd_startup_countdown, 0,
  109         "IPMI watchdog countdown initialized during startup (seconds)");
  110 SYSCTL_INT(_hw_ipmi, OID_AUTO, wd_pretimeout_countdown, CTLFLAG_RWTUN,
  111         &wd_pretimeout_countdown, 0,
  112         "IPMI watchdog pre-timeout countdown (seconds)");
  113 SYSCTL_INT(_hw_ipmi, OID_AUTO, cycle_wait, CTLFLAG_RWTUN,
  114         &cycle_wait, 0,
  115         "IPMI power cycle on reboot delay time (seconds)");
  116 
  117 static struct cdevsw ipmi_cdevsw = {
  118         .d_version =    D_VERSION,
  119         .d_open =       ipmi_open,
  120         .d_ioctl =      ipmi_ioctl,
  121         .d_poll =       ipmi_poll,
  122         .d_name =       "ipmi",
  123 };
  124 
  125 static MALLOC_DEFINE(M_IPMI, "ipmi", "ipmi");
  126 
  127 static int
  128 ipmi_open(struct cdev *cdev, int flags, int fmt, struct thread *td)
  129 {
  130         struct ipmi_device *dev;
  131         struct ipmi_softc *sc;
  132         int error;
  133 
  134         if (!on)
  135                 return (ENOENT);
  136 
  137         /* Initialize the per file descriptor data. */
  138         dev = malloc(sizeof(struct ipmi_device), M_IPMI, M_WAITOK | M_ZERO);
  139         error = devfs_set_cdevpriv(dev, ipmi_dtor);
  140         if (error) {
  141                 free(dev, M_IPMI);
  142                 return (error);
  143         }
  144 
  145         sc = cdev->si_drv1;
  146         TAILQ_INIT(&dev->ipmi_completed_requests);
  147         dev->ipmi_address = IPMI_BMC_SLAVE_ADDR;
  148         dev->ipmi_lun = IPMI_BMC_SMS_LUN;
  149         dev->ipmi_softc = sc;
  150         IPMI_LOCK(sc);
  151         sc->ipmi_opened++;
  152         IPMI_UNLOCK(sc);
  153 
  154         return (0);
  155 }
  156 
  157 static int
  158 ipmi_poll(struct cdev *cdev, int poll_events, struct thread *td)
  159 {
  160         struct ipmi_device *dev;
  161         struct ipmi_softc *sc;
  162         int revents = 0;
  163 
  164         if (devfs_get_cdevpriv((void **)&dev))
  165                 return (0);
  166 
  167         sc = cdev->si_drv1;
  168         IPMI_LOCK(sc);
  169         if (poll_events & (POLLIN | POLLRDNORM)) {
  170                 if (!TAILQ_EMPTY(&dev->ipmi_completed_requests))
  171                     revents |= poll_events & (POLLIN | POLLRDNORM);
  172                 if (dev->ipmi_requests == 0)
  173                     revents |= POLLERR;
  174         }
  175 
  176         if (revents == 0) {
  177                 if (poll_events & (POLLIN | POLLRDNORM))
  178                         selrecord(td, &dev->ipmi_select);
  179         }
  180         IPMI_UNLOCK(sc);
  181 
  182         return (revents);
  183 }
  184 
  185 static void
  186 ipmi_purge_completed_requests(struct ipmi_device *dev)
  187 {
  188         struct ipmi_request *req;
  189 
  190         while (!TAILQ_EMPTY(&dev->ipmi_completed_requests)) {
  191                 req = TAILQ_FIRST(&dev->ipmi_completed_requests);
  192                 TAILQ_REMOVE(&dev->ipmi_completed_requests, req, ir_link);
  193                 dev->ipmi_requests--;
  194                 ipmi_free_request(req);
  195         }
  196 }
  197 
  198 static void
  199 ipmi_dtor(void *arg)
  200 {
  201         struct ipmi_request *req, *nreq;
  202         struct ipmi_device *dev;
  203         struct ipmi_softc *sc;
  204 
  205         dev = arg;
  206         sc = dev->ipmi_softc;
  207 
  208         IPMI_LOCK(sc);
  209         if (dev->ipmi_requests) {
  210                 /* Throw away any pending requests for this device. */
  211                 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests_highpri, ir_link,
  212                     nreq) {
  213                         if (req->ir_owner == dev) {
  214                                 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req,
  215                                     ir_link);
  216                                 dev->ipmi_requests--;
  217                                 ipmi_free_request(req);
  218                         }
  219                 }
  220                 TAILQ_FOREACH_SAFE(req, &sc->ipmi_pending_requests, ir_link,
  221                     nreq) {
  222                         if (req->ir_owner == dev) {
  223                                 TAILQ_REMOVE(&sc->ipmi_pending_requests, req,
  224                                     ir_link);
  225                                 dev->ipmi_requests--;
  226                                 ipmi_free_request(req);
  227                         }
  228                 }
  229 
  230                 /* Throw away any pending completed requests for this device. */
  231                 ipmi_purge_completed_requests(dev);
  232 
  233                 /*
  234                  * If we still have outstanding requests, they must be stuck
  235                  * in an interface driver, so wait for those to drain.
  236                  */
  237                 dev->ipmi_closing = 1;
  238                 while (dev->ipmi_requests > 0) {
  239                         msleep(&dev->ipmi_requests, &sc->ipmi_requests_lock,
  240                             PWAIT, "ipmidrain", 0);
  241                         ipmi_purge_completed_requests(dev);
  242                 }
  243         }
  244         sc->ipmi_opened--;
  245         IPMI_UNLOCK(sc);
  246 
  247         /* Cleanup. */
  248         free(dev, M_IPMI);
  249 }
  250 
  251 static u_char
  252 ipmi_ipmb_checksum(u_char *data, int len)
  253 {
  254         u_char sum = 0;
  255 
  256         for (; len; len--)
  257                 sum += *data++;
  258         return (-sum);
  259 }
  260 
  261 static int
  262 ipmi_ioctl(struct cdev *cdev, u_long cmd, caddr_t data,
  263     int flags, struct thread *td)
  264 {
  265         struct ipmi_softc *sc;
  266         struct ipmi_device *dev;
  267         struct ipmi_request *kreq;
  268         struct ipmi_req *req = (struct ipmi_req *)data;
  269         struct ipmi_recv *recv = (struct ipmi_recv *)data;
  270         struct ipmi_addr addr;
  271 #ifdef IPMICTL_SEND_COMMAND_32
  272         struct ipmi_req32 *req32 = (struct ipmi_req32 *)data;
  273         struct ipmi_recv32 *recv32 = (struct ipmi_recv32 *)data;
  274         union {
  275                 struct ipmi_req req;
  276                 struct ipmi_recv recv;
  277         } thunk32;
  278 #endif
  279         int error, len;
  280 
  281         error = devfs_get_cdevpriv((void **)&dev);
  282         if (error)
  283                 return (error);
  284 
  285         sc = cdev->si_drv1;
  286 
  287 #ifdef IPMICTL_SEND_COMMAND_32
  288         /* Convert 32-bit structures to native. */
  289         switch (cmd) {
  290         case IPMICTL_SEND_COMMAND_32:
  291                 req = &thunk32.req;
  292                 req->addr = PTRIN(req32->addr);
  293                 req->addr_len = req32->addr_len;
  294                 req->msgid = req32->msgid;
  295                 req->msg.netfn = req32->msg.netfn;
  296                 req->msg.cmd = req32->msg.cmd;
  297                 req->msg.data_len = req32->msg.data_len;
  298                 req->msg.data = PTRIN(req32->msg.data);
  299                 break;
  300         case IPMICTL_RECEIVE_MSG_TRUNC_32:
  301         case IPMICTL_RECEIVE_MSG_32:
  302                 recv = &thunk32.recv;
  303                 recv->addr = PTRIN(recv32->addr);
  304                 recv->addr_len = recv32->addr_len;
  305                 recv->msg.data_len = recv32->msg.data_len;
  306                 recv->msg.data = PTRIN(recv32->msg.data);
  307                 break;
  308         }
  309 #endif
  310 
  311         switch (cmd) {
  312 #ifdef IPMICTL_SEND_COMMAND_32
  313         case IPMICTL_SEND_COMMAND_32:
  314 #endif
  315         case IPMICTL_SEND_COMMAND:
  316                 error = copyin(req->addr, &addr, sizeof(addr));
  317                 if (error)
  318                         return (error);
  319 
  320                 if (addr.addr_type == IPMI_SYSTEM_INTERFACE_ADDR_TYPE) {
  321                         struct ipmi_system_interface_addr *saddr =
  322                             (struct ipmi_system_interface_addr *)&addr;
  323 
  324                         kreq = ipmi_alloc_request(dev, req->msgid,
  325                             IPMI_ADDR(req->msg.netfn, saddr->lun & 0x3),
  326                             req->msg.cmd, req->msg.data_len, IPMI_MAX_RX);
  327                         error = copyin(req->msg.data, kreq->ir_request,
  328                             req->msg.data_len);
  329                         if (error) {
  330                                 ipmi_free_request(kreq);
  331                                 return (error);
  332                         }
  333                         IPMI_LOCK(sc);
  334                         dev->ipmi_requests++;
  335                         error = sc->ipmi_enqueue_request(sc, kreq);
  336                         IPMI_UNLOCK(sc);
  337                         if (error)
  338                                 return (error);
  339                         break;
  340                 }
  341 
  342                 /* Special processing for IPMB commands */
  343                 struct ipmi_ipmb_addr *iaddr = (struct ipmi_ipmb_addr *)&addr;
  344 
  345                 IPMI_ALLOC_DRIVER_REQUEST(kreq, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  346                     IPMI_SEND_MSG, req->msg.data_len + 8, IPMI_MAX_RX);
  347                 /* Construct the SEND MSG header */
  348                 kreq->ir_request[0] = iaddr->channel;
  349                 kreq->ir_request[1] = iaddr->slave_addr;
  350                 kreq->ir_request[2] = IPMI_ADDR(req->msg.netfn, iaddr->lun);
  351                 kreq->ir_request[3] =
  352                     ipmi_ipmb_checksum(&kreq->ir_request[1], 2);
  353                 kreq->ir_request[4] = dev->ipmi_address;
  354                 kreq->ir_request[5] = IPMI_ADDR(0, dev->ipmi_lun);
  355                 kreq->ir_request[6] = req->msg.cmd;
  356                 /* Copy the message data */
  357                 if (req->msg.data_len > 0) {
  358                         error = copyin(req->msg.data, &kreq->ir_request[7],
  359                             req->msg.data_len);
  360                         if (error != 0)
  361                                 return (error);
  362                 }
  363                 kreq->ir_request[req->msg.data_len + 7] =
  364                     ipmi_ipmb_checksum(&kreq->ir_request[4],
  365                     req->msg.data_len + 3);
  366                 error = ipmi_submit_driver_request(sc, kreq, MAX_TIMEOUT);
  367                 if (error != 0)
  368                         return (error);
  369 
  370                 kreq = ipmi_alloc_request(dev, req->msgid,
  371                     IPMI_ADDR(IPMI_APP_REQUEST, 0), IPMI_GET_MSG,
  372                     0, IPMI_MAX_RX);
  373                 kreq->ir_ipmb = true;
  374                 kreq->ir_ipmb_addr = IPMI_ADDR(req->msg.netfn, 0);
  375                 kreq->ir_ipmb_command = req->msg.cmd;
  376                 IPMI_LOCK(sc);
  377                 dev->ipmi_requests++;
  378                 error = sc->ipmi_enqueue_request(sc, kreq);
  379                 IPMI_UNLOCK(sc);
  380                 if (error != 0)
  381                         return (error);
  382                 break;
  383 #ifdef IPMICTL_SEND_COMMAND_32
  384         case IPMICTL_RECEIVE_MSG_TRUNC_32:
  385         case IPMICTL_RECEIVE_MSG_32:
  386 #endif
  387         case IPMICTL_RECEIVE_MSG_TRUNC:
  388         case IPMICTL_RECEIVE_MSG:
  389                 error = copyin(recv->addr, &addr, sizeof(addr));
  390                 if (error)
  391                         return (error);
  392 
  393                 IPMI_LOCK(sc);
  394                 kreq = TAILQ_FIRST(&dev->ipmi_completed_requests);
  395                 if (kreq == NULL) {
  396                         IPMI_UNLOCK(sc);
  397                         return (EAGAIN);
  398                 }
  399                 if (kreq->ir_error != 0) {
  400                         error = kreq->ir_error;
  401                         TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq,
  402                             ir_link);
  403                         dev->ipmi_requests--;
  404                         IPMI_UNLOCK(sc);
  405                         ipmi_free_request(kreq);
  406                         return (error);
  407                 }
  408 
  409                 recv->recv_type = IPMI_RESPONSE_RECV_TYPE;
  410                 recv->msgid = kreq->ir_msgid;
  411                 if (kreq->ir_ipmb) {
  412                         addr.channel = IPMI_IPMB_CHANNEL;
  413                         recv->msg.netfn =
  414                             IPMI_REPLY_ADDR(kreq->ir_ipmb_addr) >> 2;
  415                         recv->msg.cmd = kreq->ir_ipmb_command;
  416                         /* Get the compcode of response */
  417                         kreq->ir_compcode = kreq->ir_reply[6];
  418                         /* Move the reply head past response header */
  419                         kreq->ir_reply += 7;
  420                         len = kreq->ir_replylen - 7;
  421                 } else {
  422                         addr.channel = IPMI_BMC_CHANNEL;
  423                         recv->msg.netfn = IPMI_REPLY_ADDR(kreq->ir_addr) >> 2;
  424                         recv->msg.cmd = kreq->ir_command;
  425                         len = kreq->ir_replylen + 1;
  426                 }
  427 
  428                 if (recv->msg.data_len < len &&
  429                     (cmd == IPMICTL_RECEIVE_MSG
  430 #ifdef IPMICTL_RECEIVE_MSG_32
  431                     || cmd == IPMICTL_RECEIVE_MSG_32
  432 #endif
  433                     )) {
  434                         IPMI_UNLOCK(sc);
  435                         return (EMSGSIZE);
  436                 }
  437                 TAILQ_REMOVE(&dev->ipmi_completed_requests, kreq, ir_link);
  438                 dev->ipmi_requests--;
  439                 IPMI_UNLOCK(sc);
  440                 len = min(recv->msg.data_len, len);
  441                 recv->msg.data_len = len;
  442                 error = copyout(&addr, recv->addr,sizeof(addr));
  443                 if (error == 0)
  444                         error = copyout(&kreq->ir_compcode, recv->msg.data, 1);
  445                 if (error == 0)
  446                         error = copyout(kreq->ir_reply, recv->msg.data + 1,
  447                             len - 1);
  448                 ipmi_free_request(kreq);
  449                 if (error)
  450                         return (error);
  451                 break;
  452         case IPMICTL_SET_MY_ADDRESS_CMD:
  453                 IPMI_LOCK(sc);
  454                 dev->ipmi_address = *(int*)data;
  455                 IPMI_UNLOCK(sc);
  456                 break;
  457         case IPMICTL_GET_MY_ADDRESS_CMD:
  458                 IPMI_LOCK(sc);
  459                 *(int*)data = dev->ipmi_address;
  460                 IPMI_UNLOCK(sc);
  461                 break;
  462         case IPMICTL_SET_MY_LUN_CMD:
  463                 IPMI_LOCK(sc);
  464                 dev->ipmi_lun = *(int*)data & 0x3;
  465                 IPMI_UNLOCK(sc);
  466                 break;
  467         case IPMICTL_GET_MY_LUN_CMD:
  468                 IPMI_LOCK(sc);
  469                 *(int*)data = dev->ipmi_lun;
  470                 IPMI_UNLOCK(sc);
  471                 break;
  472         case IPMICTL_SET_GETS_EVENTS_CMD:
  473                 /*
  474                 device_printf(sc->ipmi_dev,
  475                     "IPMICTL_SET_GETS_EVENTS_CMD NA\n");
  476                 */
  477                 break;
  478         case IPMICTL_REGISTER_FOR_CMD:
  479         case IPMICTL_UNREGISTER_FOR_CMD:
  480                 return (EOPNOTSUPP);
  481         default:
  482                 device_printf(sc->ipmi_dev, "Unknown IOCTL %lX\n", cmd);
  483                 return (ENOIOCTL);
  484         }
  485 
  486 #ifdef IPMICTL_SEND_COMMAND_32
  487         /* Update changed fields in 32-bit structures. */
  488         switch (cmd) {
  489         case IPMICTL_RECEIVE_MSG_TRUNC_32:
  490         case IPMICTL_RECEIVE_MSG_32:
  491                 recv32->recv_type = recv->recv_type;
  492                 recv32->msgid = recv->msgid;
  493                 recv32->msg.netfn = recv->msg.netfn;
  494                 recv32->msg.cmd = recv->msg.cmd;
  495                 recv32->msg.data_len = recv->msg.data_len;
  496                 break;
  497         }
  498 #endif
  499         return (0);
  500 }
  501 
  502 /*
  503  * Request management.
  504  */
  505 
  506 __inline void
  507 ipmi_init_request(struct ipmi_request *req, struct ipmi_device *dev, long msgid,
  508     uint8_t addr, uint8_t command, size_t requestlen, size_t replylen)
  509 {
  510 
  511         req->ir_owner = dev;
  512         req->ir_msgid = msgid;
  513         req->ir_addr = addr;
  514         req->ir_command = command;
  515         if (requestlen) {
  516                 req->ir_request = (char *)&req[1];
  517                 req->ir_requestlen = requestlen;
  518         }
  519         if (replylen) {
  520                 req->ir_reply = (char *)&req[1] + requestlen;
  521                 req->ir_replybuflen = replylen;
  522         }
  523 }
  524 
  525 /* Allocate a new request with request and reply buffers. */
  526 struct ipmi_request *
  527 ipmi_alloc_request(struct ipmi_device *dev, long msgid, uint8_t addr,
  528     uint8_t command, size_t requestlen, size_t replylen)
  529 {
  530         struct ipmi_request *req;
  531 
  532         req = malloc(sizeof(struct ipmi_request) + requestlen + replylen,
  533             M_IPMI, M_WAITOK | M_ZERO);
  534         ipmi_init_request(req, dev, msgid, addr, command, requestlen, replylen);
  535         return (req);
  536 }
  537 
  538 /* Free a request no longer in use. */
  539 void
  540 ipmi_free_request(struct ipmi_request *req)
  541 {
  542 
  543         free(req, M_IPMI);
  544 }
  545 
  546 /* Store a processed request on the appropriate completion queue. */
  547 void
  548 ipmi_complete_request(struct ipmi_softc *sc, struct ipmi_request *req)
  549 {
  550         struct ipmi_device *dev;
  551 
  552         IPMI_LOCK_ASSERT(sc);
  553 
  554         /*
  555          * Anonymous requests (from inside the driver) always have a
  556          * waiter that we awaken.
  557          */
  558         if (req->ir_owner == NULL)
  559                 wakeup(req);
  560         else {
  561                 dev = req->ir_owner;
  562                 TAILQ_INSERT_TAIL(&dev->ipmi_completed_requests, req, ir_link);
  563                 selwakeup(&dev->ipmi_select);
  564                 if (dev->ipmi_closing)
  565                         wakeup(&dev->ipmi_requests);
  566         }
  567 }
  568 
  569 /* Perform an internal driver request. */
  570 int
  571 ipmi_submit_driver_request(struct ipmi_softc *sc, struct ipmi_request *req,
  572     int timo)
  573 {
  574 
  575         return (sc->ipmi_driver_request(sc, req, timo));
  576 }
  577 
  578 /*
  579  * Helper routine for polled system interfaces that use
  580  * ipmi_polled_enqueue_request() to queue requests.  This request
  581  * waits until there is a pending request and then returns the first
  582  * request.  If the driver is shutting down, it returns NULL.
  583  */
  584 struct ipmi_request *
  585 ipmi_dequeue_request(struct ipmi_softc *sc)
  586 {
  587         struct ipmi_request *req;
  588 
  589         IPMI_LOCK_ASSERT(sc);
  590 
  591         while (!sc->ipmi_detaching && TAILQ_EMPTY(&sc->ipmi_pending_requests) &&
  592             TAILQ_EMPTY(&sc->ipmi_pending_requests_highpri))
  593                 cv_wait(&sc->ipmi_request_added, &sc->ipmi_requests_lock);
  594         if (sc->ipmi_detaching)
  595                 return (NULL);
  596 
  597         req = TAILQ_FIRST(&sc->ipmi_pending_requests_highpri);
  598         if (req != NULL)
  599                 TAILQ_REMOVE(&sc->ipmi_pending_requests_highpri, req, ir_link);
  600         else {
  601                 req = TAILQ_FIRST(&sc->ipmi_pending_requests);
  602                 TAILQ_REMOVE(&sc->ipmi_pending_requests, req, ir_link);
  603         }
  604         return (req);
  605 }
  606 
  607 /* Default implementation of ipmi_enqueue_request() for polled interfaces. */
  608 int
  609 ipmi_polled_enqueue_request(struct ipmi_softc *sc, struct ipmi_request *req)
  610 {
  611 
  612         IPMI_LOCK_ASSERT(sc);
  613 
  614         TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests, req, ir_link);
  615         cv_signal(&sc->ipmi_request_added);
  616         return (0);
  617 }
  618 
  619 int
  620 ipmi_polled_enqueue_request_highpri(struct ipmi_softc *sc, struct ipmi_request *req)
  621 {
  622 
  623         IPMI_LOCK_ASSERT(sc);
  624 
  625         TAILQ_INSERT_TAIL(&sc->ipmi_pending_requests_highpri, req, ir_link);
  626         cv_signal(&sc->ipmi_request_added);
  627         return (0);
  628 }
  629 
  630 /*
  631  * Watchdog event handler.
  632  */
  633 
  634 static int
  635 ipmi_reset_watchdog(struct ipmi_softc *sc)
  636 {
  637         struct ipmi_request *req;
  638         int error;
  639 
  640         IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  641             IPMI_RESET_WDOG, 0, 0);
  642         error = ipmi_submit_driver_request(sc, req, 0);
  643         if (error) {
  644                 device_printf(sc->ipmi_dev, "Failed to reset watchdog\n");
  645         } else if (req->ir_compcode == 0x80) {
  646                 error = ENOENT;
  647         } else if (req->ir_compcode != 0) {
  648                 device_printf(sc->ipmi_dev, "Watchdog reset returned 0x%x\n",
  649                     req->ir_compcode);
  650                 error = EINVAL;
  651         }
  652         return (error);
  653 }
  654 
  655 static int
  656 ipmi_set_watchdog(struct ipmi_softc *sc, unsigned int sec)
  657 {
  658         struct ipmi_request *req;
  659         int error;
  660 
  661         if (sec > 0xffff / 10)
  662                 return (EINVAL);
  663 
  664         IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  665             IPMI_SET_WDOG, 6, 0);
  666         if (sec) {
  667                 req->ir_request[0] = IPMI_SET_WD_TIMER_DONT_STOP
  668                     | IPMI_SET_WD_TIMER_SMS_OS;
  669                 req->ir_request[1] = (wd_timer_actions & 0xff);
  670                 req->ir_request[2] = min(0xff,
  671                     min(wd_pretimeout_countdown, (sec + 2) / 4));
  672                 req->ir_request[3] = 0; /* Timer use */
  673                 req->ir_request[4] = (sec * 10) & 0xff;
  674                 req->ir_request[5] = (sec * 10) >> 8;
  675         } else {
  676                 req->ir_request[0] = IPMI_SET_WD_TIMER_SMS_OS;
  677                 req->ir_request[1] = 0;
  678                 req->ir_request[2] = 0;
  679                 req->ir_request[3] = 0; /* Timer use */
  680                 req->ir_request[4] = 0;
  681                 req->ir_request[5] = 0;
  682         }
  683         error = ipmi_submit_driver_request(sc, req, 0);
  684         if (error) {
  685                 device_printf(sc->ipmi_dev, "Failed to set watchdog\n");
  686         } else if (req->ir_compcode != 0) {
  687                 device_printf(sc->ipmi_dev, "Watchdog set returned 0x%x\n",
  688                     req->ir_compcode);
  689                 error = EINVAL;
  690         }
  691         return (error);
  692 }
  693 
  694 static void
  695 ipmi_wd_event(void *arg, unsigned int cmd, int *error)
  696 {
  697         struct ipmi_softc *sc = arg;
  698         unsigned int timeout;
  699         int e;
  700 
  701         /* Ignore requests while disabled. */
  702         if (!on)
  703                 return;
  704 
  705         /*
  706          * To prevent infinite hangs, we don't let anyone pat or change
  707          * the watchdog when we're shutting down. (See ipmi_shutdown_event().)
  708          * However, we do want to keep patting the watchdog while we are doing
  709          * a coredump.
  710          */
  711         if (wd_in_shutdown) {
  712                 if (dumping && sc->ipmi_watchdog_active)
  713                         ipmi_reset_watchdog(sc);
  714                 return;
  715         }
  716 
  717         cmd &= WD_INTERVAL;
  718         if (cmd > 0 && cmd <= 63) {
  719                 timeout = ((uint64_t)1 << cmd) / 1000000000;
  720                 if (timeout == 0)
  721                         timeout = 1;
  722                 if (timeout != sc->ipmi_watchdog_active ||
  723                     wd_timer_actions != sc->ipmi_watchdog_actions ||
  724                     wd_pretimeout_countdown != sc->ipmi_watchdog_pretimeout) {
  725                         e = ipmi_set_watchdog(sc, timeout);
  726                         if (e == 0) {
  727                                 sc->ipmi_watchdog_active = timeout;
  728                                 sc->ipmi_watchdog_actions = wd_timer_actions;
  729                                 sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
  730                         } else {
  731                                 (void)ipmi_set_watchdog(sc, 0);
  732                                 sc->ipmi_watchdog_active = 0;
  733                                 sc->ipmi_watchdog_actions = 0;
  734                                 sc->ipmi_watchdog_pretimeout = 0;
  735                         }
  736                 }
  737                 if (sc->ipmi_watchdog_active != 0) {
  738                         e = ipmi_reset_watchdog(sc);
  739                         if (e == 0) {
  740                                 *error = 0;
  741                         } else {
  742                                 (void)ipmi_set_watchdog(sc, 0);
  743                                 sc->ipmi_watchdog_active = 0;
  744                                 sc->ipmi_watchdog_actions = 0;
  745                                 sc->ipmi_watchdog_pretimeout = 0;
  746                         }
  747                 }
  748         } else if (atomic_readandclear_int(&sc->ipmi_watchdog_active) != 0) {
  749                 sc->ipmi_watchdog_actions = 0;
  750                 sc->ipmi_watchdog_pretimeout = 0;
  751 
  752                 e = ipmi_set_watchdog(sc, 0);
  753                 if (e != 0 && cmd == 0)
  754                         *error = EOPNOTSUPP;
  755         }
  756 }
  757 
  758 static void
  759 ipmi_shutdown_event(void *arg, unsigned int cmd, int *error)
  760 {
  761         struct ipmi_softc *sc = arg;
  762 
  763         /* Ignore event if disabled. */
  764         if (!on)
  765                 return;
  766 
  767         /*
  768          * Positive wd_shutdown_countdown value will re-arm watchdog;
  769          * Zero value in wd_shutdown_countdown will disable watchdog;
  770          * Negative value in wd_shutdown_countdown will keep existing state;
  771          *
  772          * Revert to using a power cycle to ensure that the watchdog will
  773          * do something useful here.  Having the watchdog send an NMI
  774          * instead is useless during shutdown, and might be ignored if an
  775          * NMI already triggered.
  776          */
  777 
  778         wd_in_shutdown = true;
  779         if (wd_shutdown_countdown == 0) {
  780                 /* disable watchdog */
  781                 ipmi_set_watchdog(sc, 0);
  782                 sc->ipmi_watchdog_active = 0;
  783         } else if (wd_shutdown_countdown > 0) {
  784                 /* set desired action and time, and, reset watchdog */
  785                 wd_timer_actions = IPMI_SET_WD_ACTION_POWER_CYCLE;
  786                 ipmi_set_watchdog(sc, wd_shutdown_countdown);
  787                 sc->ipmi_watchdog_active = wd_shutdown_countdown;
  788                 ipmi_reset_watchdog(sc);
  789         }
  790 }
  791 
  792 static void
  793 ipmi_power_cycle(void *arg, int howto)
  794 {
  795         struct ipmi_softc *sc = arg;
  796         struct ipmi_request *req;
  797 
  798         /*
  799          * Ignore everything except power cycling requests
  800          */
  801         if ((howto & RB_POWERCYCLE) == 0)
  802                 return;
  803 
  804         device_printf(sc->ipmi_dev, "Power cycling using IPMI\n");
  805 
  806         /*
  807          * Send a CHASSIS_CONTROL command to the CHASSIS device, subcommand 2
  808          * as described in IPMI v2.0 spec section 28.3.
  809          */
  810         IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_CHASSIS_REQUEST, 0),
  811             IPMI_CHASSIS_CONTROL, 1, 0);
  812         req->ir_request[0] = IPMI_CC_POWER_CYCLE;
  813 
  814         ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
  815 
  816         if (req->ir_error != 0 || req->ir_compcode != 0) {
  817                 device_printf(sc->ipmi_dev, "Power cycling via IPMI failed code %#x %#x\n",
  818                     req->ir_error, req->ir_compcode);
  819                 return;
  820         }
  821 
  822         /*
  823          * BMCs are notoriously slow, give it cycle_wait seconds for the power
  824          * down leg of the power cycle. If that fails, fallback to the next
  825          * hanlder in the shutdown_final chain and/or the platform failsafe.
  826          */
  827         DELAY(cycle_wait * 1000 * 1000);
  828         device_printf(sc->ipmi_dev, "Power cycling via IPMI timed out\n");
  829 }
  830 
  831 static void
  832 ipmi_startup(void *arg)
  833 {
  834         struct ipmi_softc *sc = arg;
  835         struct ipmi_request *req;
  836         device_t dev;
  837         int error, i;
  838 
  839         config_intrhook_disestablish(&sc->ipmi_ich);
  840         dev = sc->ipmi_dev;
  841 
  842         /* Initialize interface-independent state. */
  843         mtx_init(&sc->ipmi_requests_lock, "ipmi requests", NULL, MTX_DEF);
  844         mtx_init(&sc->ipmi_io_lock, "ipmi io", NULL, MTX_DEF);
  845         cv_init(&sc->ipmi_request_added, "ipmireq");
  846         TAILQ_INIT(&sc->ipmi_pending_requests_highpri);
  847         TAILQ_INIT(&sc->ipmi_pending_requests);
  848 
  849         /* Initialize interface-dependent state. */
  850         error = sc->ipmi_startup(sc);
  851         if (error) {
  852                 device_printf(dev, "Failed to initialize interface: %d\n",
  853                     error);
  854                 return;
  855         }
  856 
  857         /* Send a GET_DEVICE_ID request. */
  858         IPMI_ALLOC_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  859             IPMI_GET_DEVICE_ID, 0, 15);
  860 
  861         error = ipmi_submit_driver_request(sc, req, MAX_TIMEOUT);
  862         if (error == EWOULDBLOCK) {
  863                 device_printf(dev, "Timed out waiting for GET_DEVICE_ID\n");
  864                 return;
  865         } else if (error) {
  866                 device_printf(dev, "Failed GET_DEVICE_ID: %d\n", error);
  867                 return;
  868         } else if (req->ir_compcode != 0) {
  869                 device_printf(dev,
  870                     "Bad completion code for GET_DEVICE_ID: %d\n",
  871                     req->ir_compcode);
  872                 return;
  873         } else if (req->ir_replylen < 5) {
  874                 device_printf(dev, "Short reply for GET_DEVICE_ID: %d\n",
  875                     req->ir_replylen);
  876                 return;
  877         }
  878 
  879         device_printf(dev, "IPMI device rev. %d, firmware rev. %d.%d%d, "
  880             "version %d.%d, device support mask %#x\n",
  881             req->ir_reply[1] & 0x0f,
  882             req->ir_reply[2] & 0x7f, req->ir_reply[3] >> 4, req->ir_reply[3] & 0x0f,
  883             req->ir_reply[4] & 0x0f, req->ir_reply[4] >> 4, req->ir_reply[5]);
  884 
  885         sc->ipmi_dev_support = req->ir_reply[5];
  886 
  887         IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  888             IPMI_CLEAR_FLAGS, 1, 0);
  889 
  890         ipmi_submit_driver_request(sc, req, 0);
  891 
  892         /* XXX: Magic numbers */
  893         if (req->ir_compcode == 0xc0) {
  894                 device_printf(dev, "Clear flags is busy\n");
  895         }
  896         if (req->ir_compcode == 0xc1) {
  897                 device_printf(dev, "Clear flags illegal\n");
  898         }
  899 
  900         for (i = 0; i < 8; i++) {
  901                 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  902                     IPMI_GET_CHANNEL_INFO, 1, 0);
  903                 req->ir_request[0] = i;
  904 
  905                 error = ipmi_submit_driver_request(sc, req, 0);
  906 
  907                 if (error != 0 || req->ir_compcode != 0)
  908                         break;
  909         }
  910         device_printf(dev, "Number of channels %d\n", i);
  911 
  912         /*
  913          * Probe for watchdog, but only for backends which support
  914          * polled driver requests.
  915          */
  916         if (wd_init_enable && sc->ipmi_driver_requests_polled) {
  917                 IPMI_INIT_DRIVER_REQUEST(req, IPMI_ADDR(IPMI_APP_REQUEST, 0),
  918                     IPMI_GET_WDOG, 0, 0);
  919 
  920                 error = ipmi_submit_driver_request(sc, req, 0);
  921 
  922                 if (error == 0 && req->ir_compcode == 0x00) {
  923                         device_printf(dev, "Attached watchdog\n");
  924                         /* register the watchdog event handler */
  925                         sc->ipmi_watchdog_tag = EVENTHANDLER_REGISTER(
  926                                 watchdog_list, ipmi_wd_event, sc, 0);
  927                         sc->ipmi_shutdown_tag = EVENTHANDLER_REGISTER(
  928                                 shutdown_pre_sync, ipmi_shutdown_event,
  929                                 sc, 0);
  930                 }
  931         }
  932 
  933         sc->ipmi_cdev = make_dev(&ipmi_cdevsw, device_get_unit(dev),
  934             UID_ROOT, GID_OPERATOR, 0660, "ipmi%d", device_get_unit(dev));
  935         if (sc->ipmi_cdev == NULL) {
  936                 device_printf(dev, "Failed to create cdev\n");
  937                 return;
  938         }
  939         sc->ipmi_cdev->si_drv1 = sc;
  940 
  941         /*
  942          * Set initial watchdog state. If desired, set an initial
  943          * watchdog on startup. Or, if the watchdog device is
  944          * disabled, clear any existing watchdog.
  945          */
  946         if (on && wd_startup_countdown > 0) {
  947                 if (ipmi_set_watchdog(sc, wd_startup_countdown) == 0 &&
  948                     ipmi_reset_watchdog(sc) == 0) {
  949                         sc->ipmi_watchdog_active = wd_startup_countdown;
  950                         sc->ipmi_watchdog_actions = wd_timer_actions;
  951                         sc->ipmi_watchdog_pretimeout = wd_pretimeout_countdown;
  952                 } else
  953                         (void)ipmi_set_watchdog(sc, 0);
  954                 ipmi_reset_watchdog(sc);
  955         } else if (!on)
  956                 (void)ipmi_set_watchdog(sc, 0);
  957         /*
  958          * Power cycle the system off using IPMI. We use last - 2 since we don't
  959          * handle all the other kinds of reboots. We'll let others handle them.
  960          * We only try to do this if the BMC supports the Chassis device.
  961          */
  962         if (sc->ipmi_dev_support & IPMI_ADS_CHASSIS) {
  963                 device_printf(dev, "Establishing power cycle handler\n");
  964                 sc->ipmi_power_cycle_tag = EVENTHANDLER_REGISTER(shutdown_final,
  965                     ipmi_power_cycle, sc, SHUTDOWN_PRI_LAST - 2);
  966         }
  967 }
  968 
  969 int
  970 ipmi_attach(device_t dev)
  971 {
  972         struct ipmi_softc *sc = device_get_softc(dev);
  973         int error;
  974 
  975         if (sc->ipmi_irq_res != NULL && sc->ipmi_intr != NULL) {
  976                 error = bus_setup_intr(dev, sc->ipmi_irq_res, INTR_TYPE_MISC,
  977                     NULL, sc->ipmi_intr, sc, &sc->ipmi_irq);
  978                 if (error) {
  979                         device_printf(dev, "can't set up interrupt\n");
  980                         return (error);
  981                 }
  982         }
  983 
  984         bzero(&sc->ipmi_ich, sizeof(struct intr_config_hook));
  985         sc->ipmi_ich.ich_func = ipmi_startup;
  986         sc->ipmi_ich.ich_arg = sc;
  987         if (config_intrhook_establish(&sc->ipmi_ich) != 0) {
  988                 device_printf(dev, "can't establish configuration hook\n");
  989                 return (ENOMEM);
  990         }
  991 
  992         ipmi_attached = 1;
  993         return (0);
  994 }
  995 
  996 int
  997 ipmi_detach(device_t dev)
  998 {
  999         struct ipmi_softc *sc;
 1000 
 1001         sc = device_get_softc(dev);
 1002 
 1003         /* Fail if there are any open handles. */
 1004         IPMI_LOCK(sc);
 1005         if (sc->ipmi_opened) {
 1006                 IPMI_UNLOCK(sc);
 1007                 return (EBUSY);
 1008         }
 1009         IPMI_UNLOCK(sc);
 1010         if (sc->ipmi_cdev)
 1011                 destroy_dev(sc->ipmi_cdev);
 1012 
 1013         /* Detach from watchdog handling and turn off watchdog. */
 1014         if (sc->ipmi_shutdown_tag)
 1015                 EVENTHANDLER_DEREGISTER(shutdown_pre_sync,
 1016                 sc->ipmi_shutdown_tag);
 1017         if (sc->ipmi_watchdog_tag) {
 1018                 EVENTHANDLER_DEREGISTER(watchdog_list, sc->ipmi_watchdog_tag);
 1019                 ipmi_set_watchdog(sc, 0);
 1020         }
 1021 
 1022         /* Detach from shutdown handling for power cycle reboot */
 1023         if (sc->ipmi_power_cycle_tag)
 1024                 EVENTHANDLER_DEREGISTER(shutdown_final, sc->ipmi_power_cycle_tag);
 1025 
 1026         /* XXX: should use shutdown callout I think. */
 1027         /* If the backend uses a kthread, shut it down. */
 1028         IPMI_LOCK(sc);
 1029         sc->ipmi_detaching = 1;
 1030         if (sc->ipmi_kthread) {
 1031                 cv_broadcast(&sc->ipmi_request_added);
 1032                 msleep(sc->ipmi_kthread, &sc->ipmi_requests_lock, 0,
 1033                     "ipmi_wait", 0);
 1034         }
 1035         IPMI_UNLOCK(sc);
 1036         if (sc->ipmi_irq)
 1037                 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
 1038 
 1039         ipmi_release_resources(dev);
 1040         mtx_destroy(&sc->ipmi_io_lock);
 1041         mtx_destroy(&sc->ipmi_requests_lock);
 1042         return (0);
 1043 }
 1044 
 1045 void
 1046 ipmi_release_resources(device_t dev)
 1047 {
 1048         struct ipmi_softc *sc;
 1049         int i;
 1050 
 1051         sc = device_get_softc(dev);
 1052         if (sc->ipmi_irq)
 1053                 bus_teardown_intr(dev, sc->ipmi_irq_res, sc->ipmi_irq);
 1054         if (sc->ipmi_irq_res)
 1055                 bus_release_resource(dev, SYS_RES_IRQ, sc->ipmi_irq_rid,
 1056                     sc->ipmi_irq_res);
 1057         for (i = 0; i < MAX_RES; i++)
 1058                 if (sc->ipmi_io_res[i])
 1059                         bus_release_resource(dev, sc->ipmi_io_type,
 1060                             sc->ipmi_io_rid + i, sc->ipmi_io_res[i]);
 1061 }
 1062 
 1063 /* XXX: Why? */
 1064 static void
 1065 ipmi_unload(void *arg)
 1066 {
 1067         device_t *      devs;
 1068         int             count;
 1069         int             i;
 1070 
 1071         if (devclass_get_devices(devclass_find("ipmi"), &devs, &count) != 0)
 1072                 return;
 1073         for (i = 0; i < count; i++)
 1074                 device_delete_child(device_get_parent(devs[i]), devs[i]);
 1075         free(devs, M_TEMP);
 1076 }
 1077 SYSUNINIT(ipmi_unload, SI_SUB_DRIVERS, SI_ORDER_FIRST, ipmi_unload, NULL);
 1078 
 1079 #ifdef IMPI_DEBUG
 1080 static void
 1081 dump_buf(u_char *data, int len)
 1082 {
 1083         char buf[20];
 1084         char line[1024];
 1085         char temp[30];
 1086         int count = 0;
 1087         int i=0;
 1088 
 1089         printf("Address %p len %d\n", data, len);
 1090         if (len > 256)
 1091                 len = 256;
 1092         line[0] = '\000';
 1093         for (; len > 0; len--, data++) {
 1094                 sprintf(temp, "%02x ", *data);
 1095                 strcat(line, temp);
 1096                 if (*data >= ' ' && *data <= '~')
 1097                         buf[count] = *data;
 1098                 else if (*data >= 'A' && *data <= 'Z')
 1099                         buf[count] = *data;
 1100                 else
 1101                         buf[count] = '.';
 1102                 if (++count == 16) {
 1103                         buf[count] = '\000';
 1104                         count = 0;
 1105                         printf("  %3x  %s %s\n", i, line, buf);
 1106                         i+=16;
 1107                         line[0] = '\000';
 1108                 }
 1109         }
 1110         buf[count] = '\000';
 1111 
 1112         for (; count != 16; count++) {
 1113                 strcat(line, "   ");
 1114         }
 1115         printf("  %3x  %s %s\n", i, line, buf);
 1116 }
 1117 #endif

Cache object: 2a605dad3e82257376df4e68e48161e2


[ source navigation ] [ diff markup ] [ identifier search ] [ freetext search ] [ file search ] [ list types ] [ track identifier ]


This page is part of the FreeBSD/Linux Linux Kernel Cross-Reference, and was automatically generated using a modified version of the LXR engine.