1 /******************************************************************************
2 * xenstore.c
3 *
4 * Low-level kernel interface to the XenStore.
5 *
6 * Copyright (C) 2005 Rusty Russell, IBM Corporation
7 * Copyright (C) 2009,2010 Spectra Logic Corporation
8 *
9 * This file may be distributed separately from the Linux kernel, or
10 * incorporated into other software packages, subject to the following license:
11 *
12 * Permission is hereby granted, free of charge, to any person obtaining a copy
13 * of this source file (the "Software"), to deal in the Software without
14 * restriction, including without limitation the rights to use, copy, modify,
15 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
16 * and to permit persons to whom the Software is furnished to do so, subject to
17 * the following conditions:
18 *
19 * The above copyright notice and this permission notice shall be included in
20 * all copies or substantial portions of the Software.
21 *
22 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
23 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
24 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
25 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
26 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
27 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
28 * IN THE SOFTWARE.
29 */
30
31
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD: releng/8.2/sys/xen/xenstore/xenstore.c 214077 2010-10-19 20:53:30Z gibbs $");
34
35 #include <sys/param.h>
36 #include <sys/bus.h>
37 #include <sys/kernel.h>
38 #include <sys/lock.h>
39 #include <sys/module.h>
40 #include <sys/mutex.h>
41 #include <sys/sx.h>
42 #include <sys/syslog.h>
43 #include <sys/malloc.h>
44 #include <sys/systm.h>
45 #include <sys/proc.h>
46 #include <sys/kthread.h>
47 #include <sys/sbuf.h>
48 #include <sys/sysctl.h>
49 #include <sys/uio.h>
50 #include <sys/unistd.h>
51
52 #include <machine/xen/xen-os.h>
53 #include <machine/stdarg.h>
54
55 #include <xen/evtchn.h>
56 #include <xen/gnttab.h>
57 #include <xen/hypervisor.h>
58 #include <xen/xen_intr.h>
59
60 #include <xen/interface/hvm/params.h>
61
62 #include <xen/xenstore/xenstorevar.h>
63 #include <xen/xenstore/xenstore_internal.h>
64
65 #include <vm/vm.h>
66 #include <vm/pmap.h>
67
68 /**
69 * \file xenstore.c
70 * \brief XenStore interface
71 *
72 * The XenStore interface is a simple storage system that is a means of
73 * communicating state and configuration data between the Xen Domain 0
74 * and the various guest domains. All configuration data other than
75 * a small amount of essential information required during the early
76 * boot process of launching a Xen aware guest, is managed using the
77 * XenStore.
78 *
79 * The XenStore is ASCII string based, and has a structure and semantics
80 * similar to a filesystem. There are files and directories, the directories
81 * able to contain files or other directories. The depth of the hierachy
82 * is only limited by the XenStore's maximum path length.
83 *
84 * The communication channel between the XenStore service and other
85 * domains is via two, guest specific, ring buffers in a shared memory
86 * area. One ring buffer is used for communicating in each direction.
87 * The grant table references for this shared memory are given to the
88 * guest either via the xen_start_info structure for a fully para-
89 * virtualized guest, or via HVM hypercalls for a hardware virtualized
90 * guest.
91 *
92 * The XenStore communication relies on an event channel and thus
93 * interrupts. For this reason, the attachment of the XenStore
94 * relies on an interrupt driven configuration hook to hold off
95 * boot processing until communication with the XenStore service
96 * can be established.
97 *
98 * Several Xen services depend on the XenStore, most notably the
99 * XenBus used to discover and manage Xen devices. These services
100 * are implemented as NewBus child attachments to a bus exported
101 * by this XenStore driver.
102 */
103
104 static struct xs_watch *find_watch(const char *token);
105
106 MALLOC_DEFINE(M_XENSTORE, "xenstore", "XenStore data and results");
107
108 /**
109 * Pointer to shared memory communication structures allowing us
110 * to communicate with the XenStore service.
111 *
112 * When operating in full PV mode, this pointer is set early in kernel
113 * startup from within xen_machdep.c. In HVM mode, we use hypercalls
114 * to get the guest frame number for the shared page and then map it
115 * into kva. See xs_init() for details.
116 */
117 struct xenstore_domain_interface *xen_store;
118
119 /*-------------------------- Private Data Structures ------------------------*/
120
121 /**
122 * Structure capturing messages received from the XenStore service.
123 */
124 struct xs_stored_msg {
125 TAILQ_ENTRY(xs_stored_msg) list;
126
127 struct xsd_sockmsg hdr;
128
129 union {
130 /* Queued replies. */
131 struct {
132 char *body;
133 } reply;
134
135 /* Queued watch events. */
136 struct {
137 struct xs_watch *handle;
138 const char **vec;
139 u_int vec_size;
140 } watch;
141 } u;
142 };
143 TAILQ_HEAD(xs_stored_msg_list, xs_stored_msg);
144
145 /**
146 * Container for all XenStore related state.
147 */
148 struct xs_softc {
149 /** Newbus device for the XenStore. */
150 device_t xs_dev;
151
152 /**
153 * Lock serializing access to ring producer/consumer
154 * indexes. Use of this lock guarantees that wakeups
155 * of blocking readers/writers are not missed due to
156 * races with the XenStore service.
157 */
158 struct mtx ring_lock;
159
160 /*
161 * Mutex used to insure exclusive access to the outgoing
162 * communication ring. We use a lock type that can be
163 * held while sleeping so that xs_write() can block waiting
164 * for space in the ring to free up, without allowing another
165 * writer to come in and corrupt a partial message write.
166 */
167 struct sx request_mutex;
168
169 /**
170 * A list of replies to our requests.
171 *
172 * The reply list is filled by xs_rcv_thread(). It
173 * is consumed by the context that issued the request
174 * to which a reply is made. The requester blocks in
175 * xs_read_reply().
176 *
177 * /note Only one requesting context can be active at a time.
178 * This is guaranteed by the request_mutex and insures
179 * that the requester sees replies matching the order
180 * of its requests.
181 */
182 struct xs_stored_msg_list reply_list;
183
184 /** Lock protecting the reply list. */
185 struct mtx reply_lock;
186
187 /**
188 * List of registered watches.
189 */
190 struct xs_watch_list registered_watches;
191
192 /** Lock protecting the registered watches list. */
193 struct mtx registered_watches_lock;
194
195 /**
196 * List of pending watch callback events.
197 */
198 struct xs_stored_msg_list watch_events;
199
200 /** Lock protecting the watch calback list. */
201 struct mtx watch_events_lock;
202
203 /**
204 * Sleepable lock used to prevent VM suspension while a
205 * xenstore transaction is outstanding.
206 *
207 * Each active transaction holds a shared lock on the
208 * suspend mutex. Our suspend method blocks waiting
209 * to acquire an exclusive lock. This guarantees that
210 * suspend processing will only proceed once all active
211 * transactions have been retired.
212 */
213 struct sx suspend_mutex;
214
215 /**
216 * The processid of the xenwatch thread.
217 */
218 pid_t xenwatch_pid;
219
220 /**
221 * Sleepable mutex used to gate the execution of XenStore
222 * watch event callbacks.
223 *
224 * xenwatch_thread holds an exclusive lock on this mutex
225 * while delivering event callbacks, and xenstore_unregister_watch()
226 * uses an exclusive lock of this mutex to guarantee that no
227 * callbacks of the just unregistered watch are pending
228 * before returning to its caller.
229 */
230 struct sx xenwatch_mutex;
231
232 #ifdef XENHVM
233 /**
234 * The HVM guest pseudo-physical frame number. This is Xen's mapping
235 * of the true machine frame number into our "physical address space".
236 */
237 unsigned long gpfn;
238 #endif
239
240 /**
241 * The event channel for communicating with the
242 * XenStore service.
243 */
244 int evtchn;
245
246 /** Interrupt number for our event channel. */
247 u_int irq;
248
249 /**
250 * Interrupt driven config hook allowing us to defer
251 * attaching children until interrupts (and thus communication
252 * with the XenStore service) are available.
253 */
254 struct intr_config_hook xs_attachcb;
255 };
256
257 /*-------------------------------- Global Data ------------------------------*/
258 static struct xs_softc xs;
259
260 /*------------------------- Private Utility Functions -----------------------*/
261
262 /**
263 * Count and optionally record pointers to a number of NUL terminated
264 * strings in a buffer.
265 *
266 * \param strings A pointer to a contiguous buffer of NUL terminated strings.
267 * \param dest An array to store pointers to each string found in strings.
268 * \param len The length of the buffer pointed to by strings.
269 *
270 * \return A count of the number of strings found.
271 */
272 static u_int
273 extract_strings(const char *strings, const char **dest, u_int len)
274 {
275 u_int num;
276 const char *p;
277
278 for (p = strings, num = 0; p < strings + len; p += strlen(p) + 1) {
279 if (dest != NULL)
280 *dest++ = p;
281 num++;
282 }
283
284 return (num);
285 }
286
287 /**
288 * Convert a contiguous buffer containing a series of NUL terminated
289 * strings into an array of pointers to strings.
290 *
291 * The returned pointer references the array of string pointers which
292 * is followed by the storage for the string data. It is the client's
293 * responsibility to free this storage.
294 *
295 * The storage addressed by strings is free'd prior to split returning.
296 *
297 * \param strings A pointer to a contiguous buffer of NUL terminated strings.
298 * \param len The length of the buffer pointed to by strings.
299 * \param num The number of strings found and returned in the strings
300 * array.
301 *
302 * \return An array of pointers to the strings found in the input buffer.
303 */
304 static const char **
305 split(char *strings, u_int len, u_int *num)
306 {
307 const char **ret;
308
309 /* Protect against unterminated buffers. */
310 strings[len - 1] = '\0';
311
312 /* Count the strings. */
313 *num = extract_strings(strings, /*dest*/NULL, len);
314
315 /* Transfer to one big alloc for easy freeing by the caller. */
316 ret = malloc(*num * sizeof(char *) + len, M_XENSTORE, M_WAITOK);
317 memcpy(&ret[*num], strings, len);
318 free(strings, M_XENSTORE);
319
320 /* Extract pointers to newly allocated array. */
321 strings = (char *)&ret[*num];
322 (void)extract_strings(strings, /*dest*/ret, len);
323
324 return (ret);
325 }
326
327 /*------------------------- Public Utility Functions -------------------------*/
328 /*------- API comments for these methods can be found in xenstorevar.h -------*/
329 struct sbuf *
330 xs_join(const char *dir, const char *name)
331 {
332 struct sbuf *sb;
333
334 sb = sbuf_new_auto();
335 sbuf_cat(sb, dir);
336 if (name[0] != '\0') {
337 sbuf_putc(sb, '/');
338 sbuf_cat(sb, name);
339 }
340 sbuf_finish(sb);
341
342 return (sb);
343 }
344
345 /*-------------------- Low Level Communication Management --------------------*/
346 /**
347 * Interrupt handler for the XenStore event channel.
348 *
349 * XenStore reads and writes block on "xen_store" for buffer
350 * space. Wakeup any blocking operations when the XenStore
351 * service has modified the queues.
352 */
353 static void
354 xs_intr(void * arg __unused /*__attribute__((unused))*/)
355 {
356
357 /*
358 * Hold ring lock across wakeup so that clients
359 * cannot miss a wakeup.
360 */
361 mtx_lock(&xs.ring_lock);
362 wakeup(xen_store);
363 mtx_unlock(&xs.ring_lock);
364 }
365
366 /**
367 * Verify that the indexes for a ring are valid.
368 *
369 * The difference between the producer and consumer cannot
370 * exceed the size of the ring.
371 *
372 * \param cons The consumer index for the ring to test.
373 * \param prod The producer index for the ring to test.
374 *
375 * \retval 1 If indexes are in range.
376 * \retval 0 If the indexes are out of range.
377 */
378 static int
379 xs_check_indexes(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod)
380 {
381
382 return ((prod - cons) <= XENSTORE_RING_SIZE);
383 }
384
385 /**
386 * Return a pointer to, and the length of, the contiguous
387 * free region available for output in a ring buffer.
388 *
389 * \param cons The consumer index for the ring.
390 * \param prod The producer index for the ring.
391 * \param buf The base address of the ring's storage.
392 * \param len The amount of contiguous storage available.
393 *
394 * \return A pointer to the start location of the free region.
395 */
396 static void *
397 xs_get_output_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
398 char *buf, uint32_t *len)
399 {
400
401 *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(prod);
402 if ((XENSTORE_RING_SIZE - (prod - cons)) < *len)
403 *len = XENSTORE_RING_SIZE - (prod - cons);
404 return (buf + MASK_XENSTORE_IDX(prod));
405 }
406
407 /**
408 * Return a pointer to, and the length of, the contiguous
409 * data available to read from a ring buffer.
410 *
411 * \param cons The consumer index for the ring.
412 * \param prod The producer index for the ring.
413 * \param buf The base address of the ring's storage.
414 * \param len The amount of contiguous data available to read.
415 *
416 * \return A pointer to the start location of the available data.
417 */
418 static const void *
419 xs_get_input_chunk(XENSTORE_RING_IDX cons, XENSTORE_RING_IDX prod,
420 const char *buf, uint32_t *len)
421 {
422
423 *len = XENSTORE_RING_SIZE - MASK_XENSTORE_IDX(cons);
424 if ((prod - cons) < *len)
425 *len = prod - cons;
426 return (buf + MASK_XENSTORE_IDX(cons));
427 }
428
429 /**
430 * Transmit data to the XenStore service.
431 *
432 * \param tdata A pointer to the contiguous data to send.
433 * \param len The amount of data to send.
434 *
435 * \return On success 0, otherwise an errno value indicating the
436 * cause of failure.
437 *
438 * \invariant Called from thread context.
439 * \invariant The buffer pointed to by tdata is at least len bytes
440 * in length.
441 * \invariant xs.request_mutex exclusively locked.
442 */
443 static int
444 xs_write_store(const void *tdata, unsigned len)
445 {
446 XENSTORE_RING_IDX cons, prod;
447 const char *data = (const char *)tdata;
448 int error;
449
450 sx_assert(&xs.request_mutex, SX_XLOCKED);
451 while (len != 0) {
452 void *dst;
453 u_int avail;
454
455 /* Hold lock so we can't miss wakeups should we block. */
456 mtx_lock(&xs.ring_lock);
457 cons = xen_store->req_cons;
458 prod = xen_store->req_prod;
459 if ((prod - cons) == XENSTORE_RING_SIZE) {
460 /*
461 * Output ring is full. Wait for a ring event.
462 *
463 * Note that the events from both queues
464 * are combined, so being woken does not
465 * guarantee that data exist in the read
466 * ring.
467 *
468 * To simplify error recovery and the retry,
469 * we specify PDROP so our lock is *not* held
470 * when msleep returns.
471 */
472 error = msleep(xen_store, &xs.ring_lock, PCATCH|PDROP,
473 "xbwrite", /*timeout*/0);
474 if (error && error != EWOULDBLOCK)
475 return (error);
476
477 /* Try again. */
478 continue;
479 }
480 mtx_unlock(&xs.ring_lock);
481
482 /* Verify queue sanity. */
483 if (!xs_check_indexes(cons, prod)) {
484 xen_store->req_cons = xen_store->req_prod = 0;
485 return (EIO);
486 }
487
488 dst = xs_get_output_chunk(cons, prod, xen_store->req, &avail);
489 if (avail > len)
490 avail = len;
491
492 memcpy(dst, data, avail);
493 data += avail;
494 len -= avail;
495
496 /*
497 * The store to the producer index, which indicates
498 * to the other side that new data has arrived, must
499 * be visible only after our copy of the data into the
500 * ring has completed.
501 */
502 wmb();
503 xen_store->req_prod += avail;
504
505 /*
506 * notify_remote_via_evtchn implies mb(). The other side
507 * will see the change to req_prod at the time of the
508 * interrupt.
509 */
510 notify_remote_via_evtchn(xs.evtchn);
511 }
512
513 return (0);
514 }
515
516 /**
517 * Receive data from the XenStore service.
518 *
519 * \param tdata A pointer to the contiguous buffer to receive the data.
520 * \param len The amount of data to receive.
521 *
522 * \return On success 0, otherwise an errno value indicating the
523 * cause of failure.
524 *
525 * \invariant Called from thread context.
526 * \invariant The buffer pointed to by tdata is at least len bytes
527 * in length.
528 *
529 * \note xs_read does not perform any internal locking to guarantee
530 * serial access to the incoming ring buffer. However, there
531 * is only one context processing reads: xs_rcv_thread().
532 */
533 static int
534 xs_read_store(void *tdata, unsigned len)
535 {
536 XENSTORE_RING_IDX cons, prod;
537 char *data = (char *)tdata;
538 int error;
539
540 while (len != 0) {
541 u_int avail;
542 const char *src;
543
544 /* Hold lock so we can't miss wakeups should we block. */
545 mtx_lock(&xs.ring_lock);
546 cons = xen_store->rsp_cons;
547 prod = xen_store->rsp_prod;
548 if (cons == prod) {
549 /*
550 * Nothing to read. Wait for a ring event.
551 *
552 * Note that the events from both queues
553 * are combined, so being woken does not
554 * guarantee that data exist in the read
555 * ring.
556 *
557 * To simplify error recovery and the retry,
558 * we specify PDROP so our lock is *not* held
559 * when msleep returns.
560 */
561 error = msleep(xen_store, &xs.ring_lock, PCATCH|PDROP,
562 "xbread", /*timout*/0);
563 if (error && error != EWOULDBLOCK)
564 return (error);
565 continue;
566 }
567 mtx_unlock(&xs.ring_lock);
568
569 /* Verify queue sanity. */
570 if (!xs_check_indexes(cons, prod)) {
571 xen_store->rsp_cons = xen_store->rsp_prod = 0;
572 return (EIO);
573 }
574
575 src = xs_get_input_chunk(cons, prod, xen_store->rsp, &avail);
576 if (avail > len)
577 avail = len;
578
579 /*
580 * Insure the data we read is related to the indexes
581 * we read above.
582 */
583 rmb();
584
585 memcpy(data, src, avail);
586 data += avail;
587 len -= avail;
588
589 /*
590 * Insure that the producer of this ring does not see
591 * the ring space as free until after we have copied it
592 * out.
593 */
594 mb();
595 xen_store->rsp_cons += avail;
596
597 /*
598 * notify_remote_via_evtchn implies mb(). The producer
599 * will see the updated consumer index when the event
600 * is delivered.
601 */
602 notify_remote_via_evtchn(xs.evtchn);
603 }
604
605 return (0);
606 }
607
608 /*----------------------- Received Message Processing ------------------------*/
609 /**
610 * Block reading the next message from the XenStore service and
611 * process the result.
612 *
613 * \param type The returned type of the XenStore message received.
614 *
615 * \return 0 on success. Otherwise an errno value indicating the
616 * type of failure encountered.
617 */
618 static int
619 xs_process_msg(enum xsd_sockmsg_type *type)
620 {
621 struct xs_stored_msg *msg;
622 char *body;
623 int error;
624
625 msg = malloc(sizeof(*msg), M_XENSTORE, M_WAITOK);
626 error = xs_read_store(&msg->hdr, sizeof(msg->hdr));
627 if (error) {
628 free(msg, M_XENSTORE);
629 return (error);
630 }
631
632 body = malloc(msg->hdr.len + 1, M_XENSTORE, M_WAITOK);
633 error = xs_read_store(body, msg->hdr.len);
634 if (error) {
635 free(body, M_XENSTORE);
636 free(msg, M_XENSTORE);
637 return (error);
638 }
639 body[msg->hdr.len] = '\0';
640
641 *type = msg->hdr.type;
642 if (msg->hdr.type == XS_WATCH_EVENT) {
643 msg->u.watch.vec = split(body, msg->hdr.len,
644 &msg->u.watch.vec_size);
645
646 mtx_lock(&xs.registered_watches_lock);
647 msg->u.watch.handle = find_watch(
648 msg->u.watch.vec[XS_WATCH_TOKEN]);
649 if (msg->u.watch.handle != NULL) {
650 mtx_lock(&xs.watch_events_lock);
651 TAILQ_INSERT_TAIL(&xs.watch_events, msg, list);
652 wakeup(&xs.watch_events);
653 mtx_unlock(&xs.watch_events_lock);
654 } else {
655 free(msg->u.watch.vec, M_XENSTORE);
656 free(msg, M_XENSTORE);
657 }
658 mtx_unlock(&xs.registered_watches_lock);
659 } else {
660 msg->u.reply.body = body;
661 mtx_lock(&xs.reply_lock);
662 TAILQ_INSERT_TAIL(&xs.reply_list, msg, list);
663 wakeup(&xs.reply_list);
664 mtx_unlock(&xs.reply_lock);
665 }
666
667 return (0);
668 }
669
670 /**
671 * Thread body of the XenStore receive thread.
672 *
673 * This thread blocks waiting for data from the XenStore service
674 * and processes and received messages.
675 */
676 static void
677 xs_rcv_thread(void *arg __unused)
678 {
679 int error;
680 enum xsd_sockmsg_type type;
681
682 for (;;) {
683 error = xs_process_msg(&type);
684 if (error)
685 printf("XENSTORE error %d while reading message\n",
686 error);
687 }
688 }
689
690 /*---------------- XenStore Message Request/Reply Processing -----------------*/
691 /**
692 * Filter invoked before transmitting any message to the XenStore service.
693 *
694 * The role of the filter may expand, but currently serves to manage
695 * the interactions of messages with transaction state.
696 *
697 * \param request_msg_type The message type for the request.
698 */
699 static inline void
700 xs_request_filter(uint32_t request_msg_type)
701 {
702 if (request_msg_type == XS_TRANSACTION_START)
703 sx_slock(&xs.suspend_mutex);
704 }
705
706 /**
707 * Filter invoked after transmitting any message to the XenStore service.
708 *
709 * The role of the filter may expand, but currently serves to manage
710 * the interactions of messages with transaction state.
711 *
712 * \param request_msg_type The message type for the original request.
713 * \param reply_msg_type The message type for any received reply.
714 * \param request_reply_error The error status from the attempt to send
715 * the request or retrieve the reply.
716 */
717 static inline void
718 xs_reply_filter(uint32_t request_msg_type,
719 uint32_t reply_msg_type, int request_reply_error)
720 {
721 /*
722 * The count of transactions drops if we attempted
723 * to end a transaction (even if that attempt fails
724 * in error), we receive a transaction end acknowledgement
725 * or if our attempt to begin a transactionfails.
726 */
727 if (request_msg_type == XS_TRANSACTION_END
728 || (request_reply_error == 0 && reply_msg_type == XS_TRANSACTION_END)
729 || (request_msg_type == XS_TRANSACTION_START
730 && (request_reply_error != 0 || reply_msg_type == XS_ERROR)))
731 sx_sunlock(&xs.suspend_mutex);
732
733 }
734
735 #define xsd_error_count (sizeof(xsd_errors) / sizeof(xsd_errors[0]))
736
737 /**
738 * Convert a XenStore error string into an errno number.
739 *
740 * \param errorstring The error string to convert.
741 *
742 * \return The errno best matching the input string.
743 *
744 * \note Unknown error strings are converted to EINVAL.
745 */
746 static int
747 xs_get_error(const char *errorstring)
748 {
749 u_int i;
750
751 for (i = 0; i < xsd_error_count; i++) {
752 if (!strcmp(errorstring, xsd_errors[i].errstring))
753 return (xsd_errors[i].errnum);
754 }
755 log(LOG_WARNING, "XENSTORE xen store gave: unknown error %s",
756 errorstring);
757 return (EINVAL);
758 }
759
760 /**
761 * Block waiting for a reply to a message request.
762 *
763 * \param type The returned type of the reply.
764 * \param len The returned body length of the reply.
765 * \param result The returned body of the reply.
766 *
767 * \return 0 on success. Otherwise an errno indicating the
768 * cause of failure.
769 */
770 static int
771 xs_read_reply(enum xsd_sockmsg_type *type, u_int *len, void **result)
772 {
773 struct xs_stored_msg *msg;
774 char *body;
775 int error;
776
777 mtx_lock(&xs.reply_lock);
778 while (TAILQ_EMPTY(&xs.reply_list)) {
779 error = mtx_sleep(&xs.reply_list, &xs.reply_lock,
780 PCATCH, "xswait", hz/10);
781 if (error && error != EWOULDBLOCK) {
782 mtx_unlock(&xs.reply_lock);
783 return (error);
784 }
785 }
786 msg = TAILQ_FIRST(&xs.reply_list);
787 TAILQ_REMOVE(&xs.reply_list, msg, list);
788 mtx_unlock(&xs.reply_lock);
789
790 *type = msg->hdr.type;
791 if (len)
792 *len = msg->hdr.len;
793 body = msg->u.reply.body;
794
795 free(msg, M_XENSTORE);
796 *result = body;
797 return (0);
798 }
799
800 /**
801 * Pass-thru interface for XenStore access by userland processes
802 * via the XenStore device.
803 *
804 * Reply type and length data are returned by overwriting these
805 * fields in the passed in request message.
806 *
807 * \param msg A properly formatted message to transmit to
808 * the XenStore service.
809 * \param result The returned body of the reply.
810 *
811 * \return 0 on success. Otherwise an errno indicating the cause
812 * of failure.
813 *
814 * \note The returned result is provided in malloced storage and thus
815 * must be free'd by the caller with 'free(result, M_XENSTORE);
816 */
817 int
818 xs_dev_request_and_reply(struct xsd_sockmsg *msg, void **result)
819 {
820 uint32_t request_type;
821 int error;
822
823 request_type = msg->type;
824 xs_request_filter(request_type);
825
826 sx_xlock(&xs.request_mutex);
827 if ((error = xs_write_store(msg, sizeof(*msg) + msg->len)) == 0)
828 error = xs_read_reply(&msg->type, &msg->len, result);
829 sx_xunlock(&xs.request_mutex);
830
831 xs_reply_filter(request_type, msg->type, error);
832
833 return (error);
834 }
835
836 /**
837 * Send a message with an optionally muti-part body to the XenStore service.
838 *
839 * \param t The transaction to use for this request.
840 * \param request_type The type of message to send.
841 * \param iovec Pointers to the body sections of the request.
842 * \param num_vecs The number of body sections in the request.
843 * \param len The returned length of the reply.
844 * \param result The returned body of the reply.
845 *
846 * \return 0 on success. Otherwise an errno indicating
847 * the cause of failure.
848 *
849 * \note The returned result is provided in malloced storage and thus
850 * must be free'd by the caller with 'free(*result, M_XENSTORE);
851 */
852 static int
853 xs_talkv(struct xs_transaction t, enum xsd_sockmsg_type request_type,
854 const struct iovec *iovec, u_int num_vecs, u_int *len, void **result)
855 {
856 struct xsd_sockmsg msg;
857 void *ret = NULL;
858 u_int i;
859 int error;
860
861 msg.tx_id = t.id;
862 msg.req_id = 0;
863 msg.type = request_type;
864 msg.len = 0;
865 for (i = 0; i < num_vecs; i++)
866 msg.len += iovec[i].iov_len;
867
868 xs_request_filter(request_type);
869
870 sx_xlock(&xs.request_mutex);
871 error = xs_write_store(&msg, sizeof(msg));
872 if (error) {
873 printf("xs_talkv failed %d\n", error);
874 goto error_lock_held;
875 }
876
877 for (i = 0; i < num_vecs; i++) {
878 error = xs_write_store(iovec[i].iov_base, iovec[i].iov_len);
879 if (error) {
880 printf("xs_talkv failed %d\n", error);
881 goto error_lock_held;
882 }
883 }
884
885 error = xs_read_reply(&msg.type, len, &ret);
886
887 error_lock_held:
888 sx_xunlock(&xs.request_mutex);
889 xs_reply_filter(request_type, msg.type, error);
890 if (error)
891 return (error);
892
893 if (msg.type == XS_ERROR) {
894 error = xs_get_error(ret);
895 free(ret, M_XENSTORE);
896 return (error);
897 }
898
899 /* Reply is either error or an echo of our request message type. */
900 KASSERT(msg.type == request_type, ("bad xenstore message type"));
901
902 if (result)
903 *result = ret;
904 else
905 free(ret, M_XENSTORE);
906
907 return (0);
908 }
909
910 /**
911 * Wrapper for xs_talkv allowing easy transmission of a message with
912 * a single, contiguous, message body.
913 *
914 * \param t The transaction to use for this request.
915 * \param request_type The type of message to send.
916 * \param body The body of the request.
917 * \param len The returned length of the reply.
918 * \param result The returned body of the reply.
919 *
920 * \return 0 on success. Otherwise an errno indicating
921 * the cause of failure.
922 *
923 * \note The returned result is provided in malloced storage and thus
924 * must be free'd by the caller with 'free(*result, M_XENSTORE);
925 */
926 static int
927 xs_single(struct xs_transaction t, enum xsd_sockmsg_type request_type,
928 const char *body, u_int *len, void **result)
929 {
930 struct iovec iovec;
931
932 iovec.iov_base = (void *)(uintptr_t)body;
933 iovec.iov_len = strlen(body) + 1;
934
935 return (xs_talkv(t, request_type, &iovec, 1, len, result));
936 }
937
938 /*------------------------- XenStore Watch Support ---------------------------*/
939 /**
940 * Transmit a watch request to the XenStore service.
941 *
942 * \param path The path in the XenStore to watch.
943 * \param tocken A unique identifier for this watch.
944 *
945 * \return 0 on success. Otherwise an errno indicating the
946 * cause of failure.
947 */
948 static int
949 xs_watch(const char *path, const char *token)
950 {
951 struct iovec iov[2];
952
953 iov[0].iov_base = (void *)(uintptr_t) path;
954 iov[0].iov_len = strlen(path) + 1;
955 iov[1].iov_base = (void *)(uintptr_t) token;
956 iov[1].iov_len = strlen(token) + 1;
957
958 return (xs_talkv(XST_NIL, XS_WATCH, iov, 2, NULL, NULL));
959 }
960
961 /**
962 * Transmit an uwatch request to the XenStore service.
963 *
964 * \param path The path in the XenStore to watch.
965 * \param tocken A unique identifier for this watch.
966 *
967 * \return 0 on success. Otherwise an errno indicating the
968 * cause of failure.
969 */
970 static int
971 xs_unwatch(const char *path, const char *token)
972 {
973 struct iovec iov[2];
974
975 iov[0].iov_base = (void *)(uintptr_t) path;
976 iov[0].iov_len = strlen(path) + 1;
977 iov[1].iov_base = (void *)(uintptr_t) token;
978 iov[1].iov_len = strlen(token) + 1;
979
980 return (xs_talkv(XST_NIL, XS_UNWATCH, iov, 2, NULL, NULL));
981 }
982
983 /**
984 * Convert from watch token (unique identifier) to the associated
985 * internal tracking structure for this watch.
986 *
987 * \param tocken The unique identifier for the watch to find.
988 *
989 * \return A pointer to the found watch structure or NULL.
990 */
991 static struct xs_watch *
992 find_watch(const char *token)
993 {
994 struct xs_watch *i, *cmp;
995
996 cmp = (void *)strtoul(token, NULL, 16);
997
998 LIST_FOREACH(i, &xs.registered_watches, list)
999 if (i == cmp)
1000 return (i);
1001
1002 return (NULL);
1003 }
1004
1005 /**
1006 * Thread body of the XenStore watch event dispatch thread.
1007 */
1008 static void
1009 xenwatch_thread(void *unused)
1010 {
1011 struct xs_stored_msg *msg;
1012
1013 for (;;) {
1014
1015 mtx_lock(&xs.watch_events_lock);
1016 while (TAILQ_EMPTY(&xs.watch_events))
1017 mtx_sleep(&xs.watch_events,
1018 &xs.watch_events_lock,
1019 PWAIT | PCATCH, "waitev", hz/10);
1020
1021 mtx_unlock(&xs.watch_events_lock);
1022 sx_xlock(&xs.xenwatch_mutex);
1023
1024 mtx_lock(&xs.watch_events_lock);
1025 msg = TAILQ_FIRST(&xs.watch_events);
1026 if (msg)
1027 TAILQ_REMOVE(&xs.watch_events, msg, list);
1028 mtx_unlock(&xs.watch_events_lock);
1029
1030 if (msg != NULL) {
1031 /*
1032 * XXX There are messages coming in with a NULL
1033 * XXX callback. This deserves further investigation;
1034 * XXX the workaround here simply prevents the kernel
1035 * XXX from panic'ing on startup.
1036 */
1037 if (msg->u.watch.handle->callback != NULL)
1038 msg->u.watch.handle->callback(
1039 msg->u.watch.handle,
1040 (const char **)msg->u.watch.vec,
1041 msg->u.watch.vec_size);
1042 free(msg->u.watch.vec, M_XENSTORE);
1043 free(msg, M_XENSTORE);
1044 }
1045
1046 sx_xunlock(&xs.xenwatch_mutex);
1047 }
1048 }
1049
1050 /*----------- XenStore Configuration, Initialization, and Control ------------*/
1051 /**
1052 * Setup communication channels with the XenStore service.
1053 *
1054 * \return On success, 0. Otherwise an errno value indicating the
1055 * type of failure.
1056 */
1057 static int
1058 xs_init_comms(void)
1059 {
1060 int error;
1061
1062 if (xen_store->rsp_prod != xen_store->rsp_cons) {
1063 log(LOG_WARNING, "XENSTORE response ring is not quiescent "
1064 "(%08x:%08x): fixing up\n",
1065 xen_store->rsp_cons, xen_store->rsp_prod);
1066 xen_store->rsp_cons = xen_store->rsp_prod;
1067 }
1068
1069 if (xs.irq)
1070 unbind_from_irqhandler(xs.irq);
1071
1072 error = bind_caller_port_to_irqhandler(xs.evtchn, "xenstore",
1073 xs_intr, NULL, INTR_TYPE_NET, &xs.irq);
1074 if (error) {
1075 log(LOG_WARNING, "XENSTORE request irq failed %i\n", error);
1076 return (error);
1077 }
1078
1079 return (0);
1080 }
1081
1082 /*------------------ Private Device Attachment Functions --------------------*/
1083 static void
1084 xs_identify(driver_t *driver, device_t parent)
1085 {
1086
1087 BUS_ADD_CHILD(parent, 0, "xenstore", 0);
1088 }
1089
1090 /**
1091 * Probe for the existance of the XenStore.
1092 *
1093 * \param dev
1094 */
1095 static int
1096 xs_probe(device_t dev)
1097 {
1098 /*
1099 * We are either operating within a PV kernel or being probed
1100 * as the child of the successfully attached xenpci device.
1101 * Thus we are in a Xen environment and there will be a XenStore.
1102 * Uncontitionally return success.
1103 */
1104 device_set_desc(dev, "XenStore");
1105 printf("xs_probe: Probe retuns 0\n");
1106 return (0);
1107 }
1108
1109 static void
1110 xs_attach_deferred(void *arg)
1111 {
1112 xs_dev_init();
1113
1114 bus_generic_probe(xs.xs_dev);
1115 bus_generic_attach(xs.xs_dev);
1116
1117 config_intrhook_disestablish(&xs.xs_attachcb);
1118 }
1119
1120 /**
1121 * Attach to the XenStore.
1122 *
1123 * This routine also prepares for the probe/attach of drivers that rely
1124 * on the XenStore.
1125 */
1126 static int
1127 xs_attach(device_t dev)
1128 {
1129 int error;
1130
1131 /* Allow us to get device_t from softc and vice-versa. */
1132 xs.xs_dev = dev;
1133 device_set_softc(dev, &xs);
1134
1135 /*
1136 * This seems to be a layering violation. The XenStore is just
1137 * one of many clients of the Grant Table facility. It happens
1138 * to be the first and a gating consumer to all other devices,
1139 * so this does work. A better place would be in the PV support
1140 * code for fully PV kernels and the xenpci driver for HVM kernels.
1141 */
1142 error = gnttab_init();
1143 if (error != 0) {
1144 log(LOG_WARNING,
1145 "XENSTORE: Error initializing grant tables: %d\n", error);
1146 return (ENXIO);
1147 }
1148
1149 /* Initialize the interface to xenstore. */
1150 struct proc *p;
1151
1152 #ifdef XENHVM
1153 xs.evtchn = hvm_get_parameter(HVM_PARAM_STORE_EVTCHN);
1154 xs.gpfn = hvm_get_parameter(HVM_PARAM_STORE_PFN);
1155 xen_store = pmap_mapdev(xs.gpfn * PAGE_SIZE, PAGE_SIZE);
1156 #else
1157 xs.evtchn = xen_start_info->store_evtchn;
1158 #endif
1159
1160 TAILQ_INIT(&xs.reply_list);
1161 TAILQ_INIT(&xs.watch_events);
1162
1163 mtx_init(&xs.ring_lock, "ring lock", NULL, MTX_DEF);
1164 mtx_init(&xs.reply_lock, "reply lock", NULL, MTX_DEF);
1165 sx_init(&xs.xenwatch_mutex, "xenwatch");
1166 sx_init(&xs.request_mutex, "xenstore request");
1167 sx_init(&xs.suspend_mutex, "xenstore suspend");
1168 mtx_init(&xs.registered_watches_lock, "watches", NULL, MTX_DEF);
1169 mtx_init(&xs.watch_events_lock, "watch events", NULL, MTX_DEF);
1170 xs.irq = 0;
1171
1172 /* Initialize the shared memory rings to talk to xenstored */
1173 error = xs_init_comms();
1174 if (error)
1175 return (error);
1176
1177 error = kproc_create(xenwatch_thread, NULL, &p, RFHIGHPID,
1178 0, "xenwatch");
1179 if (error)
1180 return (error);
1181 xs.xenwatch_pid = p->p_pid;
1182
1183 error = kproc_create(xs_rcv_thread, NULL, NULL,
1184 RFHIGHPID, 0, "xenstore_rcv");
1185
1186 xs.xs_attachcb.ich_func = xs_attach_deferred;
1187 xs.xs_attachcb.ich_arg = NULL;
1188 config_intrhook_establish(&xs.xs_attachcb);
1189
1190 return (error);
1191 }
1192
1193 /**
1194 * Prepare for suspension of this VM by halting XenStore access after
1195 * all transactions and individual requests have completed.
1196 */
1197 static int
1198 xs_suspend(device_t dev __unused)
1199 {
1200
1201 sx_xlock(&xs.suspend_mutex);
1202 sx_xlock(&xs.request_mutex);
1203
1204 return (0);
1205 }
1206
1207 /**
1208 * Resume XenStore operations after this VM is resumed.
1209 */
1210 static int
1211 xs_resume(device_t dev __unused)
1212 {
1213 struct xs_watch *watch;
1214 char token[sizeof(watch) * 2 + 1];
1215
1216 xs_init_comms();
1217
1218 sx_xunlock(&xs.request_mutex);
1219
1220 /*
1221 * No need for registered_watches_lock: the suspend_mutex
1222 * is sufficient.
1223 */
1224 LIST_FOREACH(watch, &xs.registered_watches, list) {
1225 sprintf(token, "%lX", (long)watch);
1226 xs_watch(watch->node, token);
1227 }
1228
1229 sx_xunlock(&xs.suspend_mutex);
1230
1231 return (0);
1232 }
1233
1234 /*-------------------- Private Device Attachment Data -----------------------*/
1235 static device_method_t xenstore_methods[] = {
1236 /* Device interface */
1237 DEVMETHOD(device_identify, xs_identify),
1238 DEVMETHOD(device_probe, xs_probe),
1239 DEVMETHOD(device_attach, xs_attach),
1240 DEVMETHOD(device_detach, bus_generic_detach),
1241 DEVMETHOD(device_shutdown, bus_generic_shutdown),
1242 DEVMETHOD(device_suspend, xs_suspend),
1243 DEVMETHOD(device_resume, xs_resume),
1244
1245 /* Bus interface */
1246 DEVMETHOD(bus_add_child, bus_generic_add_child),
1247 DEVMETHOD(bus_print_child, bus_generic_print_child),
1248 DEVMETHOD(bus_alloc_resource, bus_generic_alloc_resource),
1249 DEVMETHOD(bus_release_resource, bus_generic_release_resource),
1250 DEVMETHOD(bus_activate_resource, bus_generic_activate_resource),
1251 DEVMETHOD(bus_deactivate_resource, bus_generic_deactivate_resource),
1252
1253 { 0, 0 }
1254 };
1255
1256 DEFINE_CLASS_0(xenstore, xenstore_driver, xenstore_methods, 0);
1257 static devclass_t xenstore_devclass;
1258
1259 #ifdef XENHVM
1260 DRIVER_MODULE(xenstore, xenpci, xenstore_driver, xenstore_devclass, 0, 0);
1261 #else
1262 DRIVER_MODULE(xenstore, nexus, xenstore_driver, xenstore_devclass, 0, 0);
1263 #endif
1264
1265 /*------------------------------- Sysctl Data --------------------------------*/
1266 /* XXX Shouldn't the node be somewhere else? */
1267 SYSCTL_NODE(_dev, OID_AUTO, xen, CTLFLAG_RD, NULL, "Xen");
1268 SYSCTL_INT(_dev_xen, OID_AUTO, xsd_port, CTLFLAG_RD, &xs.evtchn, 0, "");
1269 SYSCTL_ULONG(_dev_xen, OID_AUTO, xsd_kva, CTLFLAG_RD, (u_long *) &xen_store, 0, "");
1270
1271 /*-------------------------------- Public API --------------------------------*/
1272 /*------- API comments for these methods can be found in xenstorevar.h -------*/
1273 int
1274 xs_directory(struct xs_transaction t, const char *dir, const char *node,
1275 u_int *num, const char ***result)
1276 {
1277 struct sbuf *path;
1278 char *strings;
1279 u_int len = 0;
1280 int error;
1281
1282 path = xs_join(dir, node);
1283 error = xs_single(t, XS_DIRECTORY, sbuf_data(path), &len,
1284 (void **)&strings);
1285 sbuf_delete(path);
1286 if (error)
1287 return (error);
1288
1289 *result = split(strings, len, num);
1290
1291 return (0);
1292 }
1293
1294 int
1295 xs_exists(struct xs_transaction t, const char *dir, const char *node)
1296 {
1297 const char **d;
1298 int error, dir_n;
1299
1300 error = xs_directory(t, dir, node, &dir_n, &d);
1301 if (error)
1302 return (0);
1303 free(d, M_XENSTORE);
1304 return (1);
1305 }
1306
1307 int
1308 xs_read(struct xs_transaction t, const char *dir, const char *node,
1309 u_int *len, void **result)
1310 {
1311 struct sbuf *path;
1312 void *ret;
1313 int error;
1314
1315 path = xs_join(dir, node);
1316 error = xs_single(t, XS_READ, sbuf_data(path), len, &ret);
1317 sbuf_delete(path);
1318 if (error)
1319 return (error);
1320 *result = ret;
1321 return (0);
1322 }
1323
1324 int
1325 xs_write(struct xs_transaction t, const char *dir, const char *node,
1326 const char *string)
1327 {
1328 struct sbuf *path;
1329 struct iovec iovec[2];
1330 int error;
1331
1332 path = xs_join(dir, node);
1333
1334 iovec[0].iov_base = (void *)(uintptr_t) sbuf_data(path);
1335 iovec[0].iov_len = sbuf_len(path) + 1;
1336 iovec[1].iov_base = (void *)(uintptr_t) string;
1337 iovec[1].iov_len = strlen(string);
1338
1339 error = xs_talkv(t, XS_WRITE, iovec, 2, NULL, NULL);
1340 sbuf_delete(path);
1341
1342 return (error);
1343 }
1344
1345 int
1346 xs_mkdir(struct xs_transaction t, const char *dir, const char *node)
1347 {
1348 struct sbuf *path;
1349 int ret;
1350
1351 path = xs_join(dir, node);
1352 ret = xs_single(t, XS_MKDIR, sbuf_data(path), NULL, NULL);
1353 sbuf_delete(path);
1354
1355 return (ret);
1356 }
1357
1358 int
1359 xs_rm(struct xs_transaction t, const char *dir, const char *node)
1360 {
1361 struct sbuf *path;
1362 int ret;
1363
1364 path = xs_join(dir, node);
1365 ret = xs_single(t, XS_RM, sbuf_data(path), NULL, NULL);
1366 sbuf_delete(path);
1367
1368 return (ret);
1369 }
1370
1371 int
1372 xs_rm_tree(struct xs_transaction xbt, const char *base, const char *node)
1373 {
1374 struct xs_transaction local_xbt;
1375 struct sbuf *root_path_sbuf;
1376 struct sbuf *cur_path_sbuf;
1377 char *root_path;
1378 char *cur_path;
1379 const char **dir;
1380 int error;
1381 int empty;
1382
1383 retry:
1384 root_path_sbuf = xs_join(base, node);
1385 cur_path_sbuf = xs_join(base, node);
1386 root_path = sbuf_data(root_path_sbuf);
1387 cur_path = sbuf_data(cur_path_sbuf);
1388 dir = NULL;
1389 local_xbt.id = 0;
1390
1391 if (xbt.id == 0) {
1392 error = xs_transaction_start(&local_xbt);
1393 if (error != 0)
1394 goto out;
1395 xbt = local_xbt;
1396 }
1397
1398 empty = 0;
1399 while (1) {
1400 u_int count;
1401 u_int i;
1402
1403 error = xs_directory(xbt, cur_path, "", &count, &dir);
1404 if (error)
1405 goto out;
1406
1407 for (i = 0; i < count; i++) {
1408 error = xs_rm(xbt, cur_path, dir[i]);
1409 if (error == ENOTEMPTY) {
1410 struct sbuf *push_dir;
1411
1412 /*
1413 * Descend to clear out this sub directory.
1414 * We'll return to cur_dir once push_dir
1415 * is empty.
1416 */
1417 push_dir = xs_join(cur_path, dir[i]);
1418 sbuf_delete(cur_path_sbuf);
1419 cur_path_sbuf = push_dir;
1420 cur_path = sbuf_data(cur_path_sbuf);
1421 break;
1422 } else if (error != 0) {
1423 goto out;
1424 }
1425 }
1426
1427 free(dir, M_XENSTORE);
1428 dir = NULL;
1429
1430 if (i == count) {
1431 char *last_slash;
1432
1433 /* Directory is empty. It is now safe to remove. */
1434 error = xs_rm(xbt, cur_path, "");
1435 if (error != 0)
1436 goto out;
1437
1438 if (!strcmp(cur_path, root_path))
1439 break;
1440
1441 /* Return to processing the parent directory. */
1442 last_slash = strrchr(cur_path, '/');
1443 KASSERT(last_slash != NULL,
1444 ("xs_rm_tree: mangled path %s", cur_path));
1445 *last_slash = '\0';
1446 }
1447 }
1448
1449 out:
1450 sbuf_delete(cur_path_sbuf);
1451 sbuf_delete(root_path_sbuf);
1452 if (dir != NULL)
1453 free(dir, M_XENSTORE);
1454
1455 if (local_xbt.id != 0) {
1456 int terror;
1457
1458 terror = xs_transaction_end(local_xbt, /*abort*/error != 0);
1459 xbt.id = 0;
1460 if (terror == EAGAIN && error == 0)
1461 goto retry;
1462 }
1463 return (error);
1464 }
1465
1466 int
1467 xs_transaction_start(struct xs_transaction *t)
1468 {
1469 char *id_str;
1470 int error;
1471
1472 error = xs_single(XST_NIL, XS_TRANSACTION_START, "", NULL,
1473 (void **)&id_str);
1474 if (error == 0) {
1475 t->id = strtoul(id_str, NULL, 0);
1476 free(id_str, M_XENSTORE);
1477 }
1478 return (error);
1479 }
1480
1481 int
1482 xs_transaction_end(struct xs_transaction t, int abort)
1483 {
1484 char abortstr[2];
1485
1486 if (abort)
1487 strcpy(abortstr, "F");
1488 else
1489 strcpy(abortstr, "T");
1490
1491 return (xs_single(t, XS_TRANSACTION_END, abortstr, NULL, NULL));
1492 }
1493
1494 int
1495 xs_scanf(struct xs_transaction t, const char *dir, const char *node,
1496 int *scancountp, const char *fmt, ...)
1497 {
1498 va_list ap;
1499 int error, ns;
1500 char *val;
1501
1502 error = xs_read(t, dir, node, NULL, (void **) &val);
1503 if (error)
1504 return (error);
1505
1506 va_start(ap, fmt);
1507 ns = vsscanf(val, fmt, ap);
1508 va_end(ap);
1509 free(val, M_XENSTORE);
1510 /* Distinctive errno. */
1511 if (ns == 0)
1512 return (ERANGE);
1513 if (scancountp)
1514 *scancountp = ns;
1515 return (0);
1516 }
1517
1518 int
1519 xs_vprintf(struct xs_transaction t,
1520 const char *dir, const char *node, const char *fmt, va_list ap)
1521 {
1522 struct sbuf *sb;
1523 int error;
1524
1525 sb = sbuf_new_auto();
1526 sbuf_vprintf(sb, fmt, ap);
1527 sbuf_finish(sb);
1528 error = xs_write(t, dir, node, sbuf_data(sb));
1529 sbuf_delete(sb);
1530
1531 return (error);
1532 }
1533
1534 int
1535 xs_printf(struct xs_transaction t, const char *dir, const char *node,
1536 const char *fmt, ...)
1537 {
1538 va_list ap;
1539 int error;
1540
1541 va_start(ap, fmt);
1542 error = xs_vprintf(t, dir, node, fmt, ap);
1543 va_end(ap);
1544
1545 return (error);
1546 }
1547
1548 int
1549 xs_gather(struct xs_transaction t, const char *dir, ...)
1550 {
1551 va_list ap;
1552 const char *name;
1553 int error;
1554
1555 va_start(ap, dir);
1556 error = 0;
1557 while (error == 0 && (name = va_arg(ap, char *)) != NULL) {
1558 const char *fmt = va_arg(ap, char *);
1559 void *result = va_arg(ap, void *);
1560 char *p;
1561
1562 error = xs_read(t, dir, name, NULL, (void **) &p);
1563 if (error)
1564 break;
1565
1566 if (fmt) {
1567 if (sscanf(p, fmt, result) == 0)
1568 error = EINVAL;
1569 free(p, M_XENSTORE);
1570 } else
1571 *(char **)result = p;
1572 }
1573 va_end(ap);
1574
1575 return (error);
1576 }
1577
1578 int
1579 xs_register_watch(struct xs_watch *watch)
1580 {
1581 /* Pointer in ascii is the token. */
1582 char token[sizeof(watch) * 2 + 1];
1583 int error;
1584
1585 sprintf(token, "%lX", (long)watch);
1586
1587 sx_slock(&xs.suspend_mutex);
1588
1589 mtx_lock(&xs.registered_watches_lock);
1590 KASSERT(find_watch(token) == NULL, ("watch already registered"));
1591 LIST_INSERT_HEAD(&xs.registered_watches, watch, list);
1592 mtx_unlock(&xs.registered_watches_lock);
1593
1594 error = xs_watch(watch->node, token);
1595
1596 /* Ignore errors due to multiple registration. */
1597 if (error == EEXIST)
1598 error = 0;
1599
1600 if (error != 0) {
1601 mtx_lock(&xs.registered_watches_lock);
1602 LIST_REMOVE(watch, list);
1603 mtx_unlock(&xs.registered_watches_lock);
1604 }
1605
1606 sx_sunlock(&xs.suspend_mutex);
1607
1608 return (error);
1609 }
1610
1611 void
1612 xs_unregister_watch(struct xs_watch *watch)
1613 {
1614 struct xs_stored_msg *msg, *tmp;
1615 char token[sizeof(watch) * 2 + 1];
1616 int error;
1617
1618 sprintf(token, "%lX", (long)watch);
1619
1620 sx_slock(&xs.suspend_mutex);
1621
1622 mtx_lock(&xs.registered_watches_lock);
1623 if (find_watch(token) == NULL) {
1624 mtx_unlock(&xs.registered_watches_lock);
1625 sx_sunlock(&xs.suspend_mutex);
1626 return;
1627 }
1628 LIST_REMOVE(watch, list);
1629 mtx_unlock(&xs.registered_watches_lock);
1630
1631 error = xs_unwatch(watch->node, token);
1632 if (error)
1633 log(LOG_WARNING, "XENSTORE Failed to release watch %s: %i\n",
1634 watch->node, error);
1635
1636 sx_sunlock(&xs.suspend_mutex);
1637
1638 /* Cancel pending watch events. */
1639 mtx_lock(&xs.watch_events_lock);
1640 TAILQ_FOREACH_SAFE(msg, &xs.watch_events, list, tmp) {
1641 if (msg->u.watch.handle != watch)
1642 continue;
1643 TAILQ_REMOVE(&xs.watch_events, msg, list);
1644 free(msg->u.watch.vec, M_XENSTORE);
1645 free(msg, M_XENSTORE);
1646 }
1647 mtx_unlock(&xs.watch_events_lock);
1648
1649 /* Flush any currently-executing callback, unless we are it. :-) */
1650 if (curproc->p_pid != xs.xenwatch_pid) {
1651 sx_xlock(&xs.xenwatch_mutex);
1652 sx_xunlock(&xs.xenwatch_mutex);
1653 }
1654 }
Cache object: df5287673974d629aa84fa9c24a8f70e
|