1 /******************************************************************************
2 * evtchn.c
3 *
4 * Driver for receiving and demuxing event-channel signals.
5 *
6 * Copyright (c) 2004-2005, K A Fraser
7 * Multi-process extensions Copyright (c) 2004, Steven Smith
8 * FreeBSD port Copyright (c) 2014, Roger Pau Monné
9 * Fetched from git://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git
10 * File: drivers/xen/evtchn.c
11 * Git commit: 0dc0064add422bc0ef5165ebe9ece3052bbd457d
12 *
13 * This program is free software; you can redistribute it and/or
14 * modify it under the terms of the GNU General Public License version 2
15 * as published by the Free Software Foundation; or, when distributed
16 * separately from the Linux kernel or incorporated into other
17 * software packages, subject to the following license:
18 *
19 * Permission is hereby granted, free of charge, to any person obtaining a copy
20 * of this source file (the "Software"), to deal in the Software without
21 * restriction, including without limitation the rights to use, copy, modify,
22 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
23 * and to permit persons to whom the Software is furnished to do so, subject to
24 * the following conditions:
25 *
26 * The above copyright notice and this permission notice shall be included in
27 * all copies or substantial portions of the Software.
28 *
29 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
30 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
31 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
32 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
33 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
34 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
35 * IN THE SOFTWARE.
36 */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/systm.h>
43 #include <sys/uio.h>
44 #include <sys/bus.h>
45 #include <sys/malloc.h>
46 #include <sys/kernel.h>
47 #include <sys/lock.h>
48 #include <sys/mutex.h>
49 #include <sys/sx.h>
50 #include <sys/selinfo.h>
51 #include <sys/poll.h>
52 #include <sys/conf.h>
53 #include <sys/fcntl.h>
54 #include <sys/ioccom.h>
55 #include <sys/rman.h>
56 #include <sys/tree.h>
57 #include <sys/module.h>
58 #include <sys/filio.h>
59 #include <sys/vnode.h>
60
61 #include <machine/xen/synch_bitops.h>
62
63 #include <xen/xen-os.h>
64 #include <xen/evtchn.h>
65 #include <xen/xen_intr.h>
66
67 #include <xen/evtchn/evtchnvar.h>
68
69 MALLOC_DEFINE(M_EVTCHN, "evtchn_dev", "Xen event channel user-space device");
70
71 struct user_evtchn;
72
73 static int evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2);
74
75 RB_HEAD(evtchn_tree, user_evtchn);
76
77 struct per_user_data {
78 struct mtx bind_mutex; /* serialize bind/unbind operations */
79 struct evtchn_tree evtchns;
80
81 /* Notification ring, accessed via /dev/xen/evtchn. */
82 #define EVTCHN_RING_SIZE (PAGE_SIZE / sizeof(evtchn_port_t))
83 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
84 evtchn_port_t *ring;
85 unsigned int ring_cons, ring_prod, ring_overflow;
86 struct sx ring_cons_mutex; /* protect against concurrent readers */
87 struct mtx ring_prod_mutex; /* product against concurrent interrupts */
88 struct selinfo ev_rsel;
89 };
90
91 struct user_evtchn {
92 RB_ENTRY(user_evtchn) node;
93 struct per_user_data *user;
94 evtchn_port_t port;
95 xen_intr_handle_t handle;
96 bool enabled;
97 };
98
99 RB_GENERATE_STATIC(evtchn_tree, user_evtchn, node, evtchn_cmp);
100
101 static device_t evtchn_dev;
102
103 static d_read_t evtchn_read;
104 static d_write_t evtchn_write;
105 static d_ioctl_t evtchn_ioctl;
106 static d_poll_t evtchn_poll;
107 static d_open_t evtchn_open;
108
109 static void evtchn_release(void *arg);
110
111 static struct cdevsw evtchn_devsw = {
112 .d_version = D_VERSION,
113 .d_open = evtchn_open,
114 .d_read = evtchn_read,
115 .d_write = evtchn_write,
116 .d_ioctl = evtchn_ioctl,
117 .d_poll = evtchn_poll,
118 .d_name = "evtchn",
119 };
120
121 /*------------------------- Red-black tree helpers ---------------------------*/
122 static int
123 evtchn_cmp(struct user_evtchn *u1, struct user_evtchn *u2)
124 {
125
126 return (u1->port - u2->port);
127 }
128
129 static struct user_evtchn *
130 find_evtchn(struct per_user_data *u, evtchn_port_t port)
131 {
132 struct user_evtchn tmp = {
133 .port = port,
134 };
135
136 return (RB_FIND(evtchn_tree, &u->evtchns, &tmp));
137 }
138
139 /*--------------------------- Interrupt handlers -----------------------------*/
140 static int
141 evtchn_filter(void *arg)
142 {
143 struct user_evtchn *evtchn;
144
145 evtchn = arg;
146
147 if (!evtchn->enabled && bootverbose) {
148 device_printf(evtchn_dev,
149 "Received upcall for disabled event channel %d\n",
150 evtchn->port);
151 }
152
153 evtchn_mask_port(evtchn->port);
154 evtchn->enabled = false;
155
156 return (FILTER_SCHEDULE_THREAD);
157 }
158
159 static void
160 evtchn_interrupt(void *arg)
161 {
162 struct user_evtchn *evtchn;
163 struct per_user_data *u;
164
165 evtchn = arg;
166 u = evtchn->user;
167
168 /*
169 * Protect against concurrent events using this handler
170 * on different CPUs.
171 */
172 mtx_lock(&u->ring_prod_mutex);
173 if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
174 u->ring[EVTCHN_RING_MASK(u->ring_prod)] = evtchn->port;
175 wmb(); /* Ensure ring contents visible */
176 if (u->ring_cons == u->ring_prod++) {
177 wakeup(u);
178 selwakeup(&u->ev_rsel);
179 }
180 } else
181 u->ring_overflow = 1;
182 mtx_unlock(&u->ring_prod_mutex);
183 }
184
185 /*------------------------- Character device methods -------------------------*/
186 static int
187 evtchn_open(struct cdev *dev, int flag, int otyp, struct thread *td)
188 {
189 struct per_user_data *u;
190 int error;
191
192 u = malloc(sizeof(*u), M_EVTCHN, M_WAITOK | M_ZERO);
193 u->ring = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK | M_ZERO);
194
195 /* Initialize locks */
196 mtx_init(&u->bind_mutex, "evtchn_bind_mutex", NULL, MTX_DEF);
197 sx_init(&u->ring_cons_mutex, "evtchn_ringc_sx");
198 mtx_init(&u->ring_prod_mutex, "evtchn_ringp_mutex", NULL, MTX_DEF);
199
200 /* Initialize red-black tree. */
201 RB_INIT(&u->evtchns);
202
203 /* Assign the allocated per_user_data to this open instance. */
204 error = devfs_set_cdevpriv(u, evtchn_release);
205 if (error != 0) {
206 mtx_destroy(&u->bind_mutex);
207 mtx_destroy(&u->ring_prod_mutex);
208 sx_destroy(&u->ring_cons_mutex);
209 free(u->ring, M_EVTCHN);
210 free(u, M_EVTCHN);
211 }
212
213 return (error);
214 }
215
216 static void
217 evtchn_release(void *arg)
218 {
219 struct per_user_data *u;
220 struct user_evtchn *evtchn, *tmp;
221
222 u = arg;
223
224 seldrain(&u->ev_rsel);
225
226 RB_FOREACH_SAFE(evtchn, evtchn_tree, &u->evtchns, tmp) {
227 xen_intr_unbind(&evtchn->handle);
228
229 RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
230 free(evtchn, M_EVTCHN);
231 }
232
233 mtx_destroy(&u->bind_mutex);
234 mtx_destroy(&u->ring_prod_mutex);
235 sx_destroy(&u->ring_cons_mutex);
236 free(u->ring, M_EVTCHN);
237 free(u, M_EVTCHN);
238 }
239
240 static int
241 evtchn_read(struct cdev *dev, struct uio *uio, int ioflag)
242 {
243 int error, count;
244 unsigned int c, p, bytes1 = 0, bytes2 = 0;
245 struct per_user_data *u;
246
247 error = devfs_get_cdevpriv((void **)&u);
248 if (error != 0)
249 return (EINVAL);
250
251 /* Whole number of ports. */
252 count = uio->uio_resid;
253 count &= ~(sizeof(evtchn_port_t)-1);
254
255 if (count == 0)
256 return (0);
257
258 if (count > PAGE_SIZE)
259 count = PAGE_SIZE;
260
261 sx_xlock(&u->ring_cons_mutex);
262 for (;;) {
263 if (u->ring_overflow) {
264 error = EFBIG;
265 goto unlock_out;
266 }
267
268 c = u->ring_cons;
269 p = u->ring_prod;
270 if (c != p)
271 break;
272
273 if (ioflag & IO_NDELAY) {
274 error = EWOULDBLOCK;
275 goto unlock_out;
276 }
277
278 error = sx_sleep(u, &u->ring_cons_mutex, PCATCH, "evtchw", 0);
279 if ((error != 0) && (error != EWOULDBLOCK))
280 goto unlock_out;
281 }
282
283 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
284 if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
285 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
286 sizeof(evtchn_port_t);
287 bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
288 } else {
289 bytes1 = (p - c) * sizeof(evtchn_port_t);
290 bytes2 = 0;
291 }
292
293 /* Truncate chunks according to caller's maximum byte count. */
294 if (bytes1 > count) {
295 bytes1 = count;
296 bytes2 = 0;
297 } else if ((bytes1 + bytes2) > count) {
298 bytes2 = count - bytes1;
299 }
300
301 error = EFAULT;
302 rmb(); /* Ensure that we see the port before we copy it. */
303
304 if (uiomove(&u->ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
305 ((bytes2 != 0) && uiomove(&u->ring[0], bytes2, uio)))
306 goto unlock_out;
307
308 u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
309 error = 0;
310
311 unlock_out:
312 sx_xunlock(&u->ring_cons_mutex);
313 return (error);
314 }
315
316 static int
317 evtchn_write(struct cdev *dev, struct uio *uio, int ioflag)
318 {
319 int error, i, count;
320 evtchn_port_t *kbuf;
321 struct per_user_data *u;
322
323 error = devfs_get_cdevpriv((void **)&u);
324 if (error != 0)
325 return (EINVAL);
326
327 kbuf = malloc(PAGE_SIZE, M_EVTCHN, M_WAITOK);
328
329 count = uio->uio_resid;
330 /* Whole number of ports. */
331 count &= ~(sizeof(evtchn_port_t)-1);
332
333 error = 0;
334 if (count == 0)
335 goto out;
336
337 if (count > PAGE_SIZE)
338 count = PAGE_SIZE;
339
340 error = uiomove(kbuf, count, uio);
341 if (error != 0)
342 goto out;
343
344 mtx_lock(&u->bind_mutex);
345
346 for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) {
347 evtchn_port_t port = kbuf[i];
348 struct user_evtchn *evtchn;
349
350 evtchn = find_evtchn(u, port);
351 if (evtchn && !evtchn->enabled) {
352 evtchn->enabled = true;
353 evtchn_unmask_port(evtchn->port);
354 }
355 }
356
357 mtx_unlock(&u->bind_mutex);
358 error = 0;
359
360 out:
361 free(kbuf, M_EVTCHN);
362 return (error);
363 }
364
365 static inline int
366 evtchn_bind_user_port(struct per_user_data *u, struct user_evtchn *evtchn)
367 {
368 int error;
369
370 evtchn->port = xen_intr_port(evtchn->handle);
371 evtchn->user = u;
372 evtchn->enabled = true;
373 mtx_lock(&u->bind_mutex);
374 RB_INSERT(evtchn_tree, &u->evtchns, evtchn);
375 mtx_unlock(&u->bind_mutex);
376 error = xen_intr_add_handler(device_get_nameunit(evtchn_dev),
377 evtchn_filter, evtchn_interrupt, evtchn,
378 INTR_TYPE_MISC | INTR_MPSAFE, evtchn->handle);
379 if (error != 0) {
380 xen_intr_unbind(&evtchn->handle);
381 mtx_lock(&u->bind_mutex);
382 RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
383 mtx_unlock(&u->bind_mutex);
384 free(evtchn, M_EVTCHN);
385 }
386 return (error);
387 }
388
389 static int
390 evtchn_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg,
391 int mode, struct thread *td __unused)
392 {
393 struct per_user_data *u;
394 int error;
395
396 error = devfs_get_cdevpriv((void **)&u);
397 if (error != 0)
398 return (EINVAL);
399
400 switch (cmd) {
401 case IOCTL_EVTCHN_BIND_VIRQ: {
402 struct ioctl_evtchn_bind_virq *bind;
403 struct user_evtchn *evtchn;
404
405 evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
406
407 bind = (struct ioctl_evtchn_bind_virq *)arg;
408
409 error = xen_intr_bind_virq(evtchn_dev, bind->virq, 0,
410 NULL, NULL, NULL, 0, &evtchn->handle);
411 if (error != 0) {
412 free(evtchn, M_EVTCHN);
413 break;
414 }
415 error = evtchn_bind_user_port(u, evtchn);
416 if (error != 0)
417 break;
418 bind->port = evtchn->port;
419 break;
420 }
421
422 case IOCTL_EVTCHN_BIND_INTERDOMAIN: {
423 struct ioctl_evtchn_bind_interdomain *bind;
424 struct user_evtchn *evtchn;
425
426 evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
427
428 bind = (struct ioctl_evtchn_bind_interdomain *)arg;
429
430 error = xen_intr_bind_remote_port(evtchn_dev,
431 bind->remote_domain, bind->remote_port, NULL,
432 NULL, NULL, 0, &evtchn->handle);
433 if (error != 0) {
434 free(evtchn, M_EVTCHN);
435 break;
436 }
437 error = evtchn_bind_user_port(u, evtchn);
438 if (error != 0)
439 break;
440 bind->port = evtchn->port;
441 break;
442 }
443
444 case IOCTL_EVTCHN_BIND_UNBOUND_PORT: {
445 struct ioctl_evtchn_bind_unbound_port *bind;
446 struct user_evtchn *evtchn;
447
448 evtchn = malloc(sizeof(*evtchn), M_EVTCHN, M_WAITOK | M_ZERO);
449
450 bind = (struct ioctl_evtchn_bind_unbound_port *)arg;
451
452 error = xen_intr_alloc_and_bind_local_port(evtchn_dev,
453 bind->remote_domain, NULL, NULL, NULL, 0, &evtchn->handle);
454 if (error != 0) {
455 free(evtchn, M_EVTCHN);
456 break;
457 }
458 error = evtchn_bind_user_port(u, evtchn);
459 if (error != 0)
460 break;
461 bind->port = evtchn->port;
462 break;
463 }
464
465 case IOCTL_EVTCHN_UNBIND: {
466 struct ioctl_evtchn_unbind *unbind;
467 struct user_evtchn *evtchn;
468
469 unbind = (struct ioctl_evtchn_unbind *)arg;
470
471 mtx_lock(&u->bind_mutex);
472 evtchn = find_evtchn(u, unbind->port);
473 if (evtchn == NULL) {
474 error = ENOTCONN;
475 break;
476 }
477 RB_REMOVE(evtchn_tree, &u->evtchns, evtchn);
478 mtx_unlock(&u->bind_mutex);
479
480 xen_intr_unbind(&evtchn->handle);
481 free(evtchn, M_EVTCHN);
482 error = 0;
483 break;
484 }
485
486 case IOCTL_EVTCHN_NOTIFY: {
487 struct ioctl_evtchn_notify *notify;
488 struct user_evtchn *evtchn;
489
490 notify = (struct ioctl_evtchn_notify *)arg;
491
492 mtx_lock(&u->bind_mutex);
493 evtchn = find_evtchn(u, notify->port);
494 if (evtchn == NULL) {
495 error = ENOTCONN;
496 break;
497 }
498
499 xen_intr_signal(evtchn->handle);
500 mtx_unlock(&u->bind_mutex);
501 error = 0;
502 break;
503 }
504
505 case IOCTL_EVTCHN_RESET: {
506 /* Initialise the ring to empty. Clear errors. */
507 sx_xlock(&u->ring_cons_mutex);
508 mtx_lock(&u->ring_prod_mutex);
509 u->ring_cons = u->ring_prod = u->ring_overflow = 0;
510 mtx_unlock(&u->ring_prod_mutex);
511 sx_xunlock(&u->ring_cons_mutex);
512 error = 0;
513 break;
514 }
515
516 case FIONBIO:
517 case FIOASYNC:
518 /* Handled in an upper layer */
519 error = 0;
520 break;
521
522 default:
523 error = ENOTTY;
524 break;
525 }
526
527 return (error);
528 }
529
530 static int
531 evtchn_poll(struct cdev *dev, int events, struct thread *td)
532 {
533 struct per_user_data *u;
534 int error, mask;
535
536 error = devfs_get_cdevpriv((void **)&u);
537 if (error != 0)
538 return (POLLERR);
539
540 /* we can always write */
541 mask = events & (POLLOUT | POLLWRNORM);
542
543 mtx_lock(&u->ring_prod_mutex);
544 if (events & (POLLIN | POLLRDNORM)) {
545 if (u->ring_cons != u->ring_prod) {
546 mask |= events & (POLLIN | POLLRDNORM);
547 } else {
548 /* Record that someone is waiting */
549 selrecord(td, &u->ev_rsel);
550 }
551 }
552 mtx_unlock(&u->ring_prod_mutex);
553
554 return (mask);
555 }
556
557 /*------------------ Private Device Attachment Functions --------------------*/
558 static void
559 evtchn_identify(driver_t *driver, device_t parent)
560 {
561
562 KASSERT((xen_domain()),
563 ("Trying to attach evtchn device on non Xen domain"));
564
565 evtchn_dev = BUS_ADD_CHILD(parent, 0, "evtchn", 0);
566 if (evtchn_dev == NULL)
567 panic("unable to attach evtchn user-space device");
568 }
569
570 static int
571 evtchn_probe(device_t dev)
572 {
573
574 device_set_desc(dev, "Xen event channel user-space device");
575 return (BUS_PROBE_NOWILDCARD);
576 }
577
578 static int
579 evtchn_attach(device_t dev)
580 {
581
582 make_dev_credf(MAKEDEV_ETERNAL, &evtchn_devsw, 0, NULL, UID_ROOT,
583 GID_WHEEL, 0600, "xen/evtchn");
584 return (0);
585 }
586
587 /*-------------------- Private Device Attachment Data -----------------------*/
588 static device_method_t evtchn_methods[] = {
589 DEVMETHOD(device_identify, evtchn_identify),
590 DEVMETHOD(device_probe, evtchn_probe),
591 DEVMETHOD(device_attach, evtchn_attach),
592
593 DEVMETHOD_END
594 };
595
596 static driver_t evtchn_driver = {
597 "evtchn",
598 evtchn_methods,
599 0,
600 };
601
602 DRIVER_MODULE(evtchn, xenpv, evtchn_driver, 0, 0);
603 MODULE_DEPEND(evtchn, xenpv, 1, 1, 1);
Cache object: 53c2bf8c134160967b7aeefd13c6bb63
|