1 /******************************************************************************
2 * evtchn.c
3 *
4 * Xenolinux driver for receiving and demuxing event-channel signals.
5 *
6 * Copyright (c) 2004, K A Fraser
7 */
8
9 #include <sys/cdefs.h>
10 __FBSDID("$FreeBSD$");
11
12 #include <sys/param.h>
13 #include <sys/systm.h>
14 #include <sys/uio.h>
15 #include <sys/bus.h>
16 #include <sys/malloc.h>
17 #include <sys/kernel.h>
18 #include <sys/lock.h>
19 #include <sys/mutex.h>
20 #include <sys/selinfo.h>
21 #include <sys/poll.h>
22 #include <sys/conf.h>
23 #include <sys/fcntl.h>
24 #include <sys/ioccom.h>
25 #include <sys/rman.h>
26
27 #include <xen/xen-os.h>
28 #include <xen/evtchn.h>
29 #include <xen/xen_intr.h>
30
31 #include <machine/bus.h>
32 #include <machine/resource.h>
33 #include <machine/xen/synch_bitops.h>
34
35 #include <xen/evtchn/evtchnvar.h>
36
37 typedef struct evtchn_sotfc {
38
39 struct selinfo ev_rsel;
40 } evtchn_softc_t;
41
42 /* Only one process may open /dev/xen/evtchn at any time. */
43 static unsigned long evtchn_dev_inuse;
44
45 /* Notification ring, accessed via /dev/xen/evtchn. */
46
47 #define EVTCHN_RING_SIZE 2048 /* 2048 16-bit entries */
48
49 #define EVTCHN_RING_MASK(_i) ((_i)&(EVTCHN_RING_SIZE-1))
50 static uint16_t *ring;
51 static unsigned int ring_cons, ring_prod, ring_overflow;
52
53 /* Which ports is user-space bound to? */
54 static uint32_t bound_ports[32];
55
56 /* Unique address for processes to sleep on */
57 static void *evtchn_waddr = ˚
58
59 static struct mtx lock, upcall_lock;
60
61 static d_read_t evtchn_read;
62 static d_write_t evtchn_write;
63 static d_ioctl_t evtchn_ioctl;
64 static d_poll_t evtchn_poll;
65 static d_open_t evtchn_open;
66 static d_close_t evtchn_close;
67
68
69 void
70 evtchn_device_upcall(evtchn_port_t port)
71 {
72 mtx_lock(&upcall_lock);
73
74 evtchn_mask_port(port);
75 evtchn_clear_port(port);
76
77 if ( ring != NULL ) {
78 if ( (ring_prod - ring_cons) < EVTCHN_RING_SIZE ) {
79 ring[EVTCHN_RING_MASK(ring_prod)] = (uint16_t)port;
80 if ( ring_cons == ring_prod++ ) {
81 wakeup(evtchn_waddr);
82 }
83 }
84 else {
85 ring_overflow = 1;
86 }
87 }
88
89 mtx_unlock(&upcall_lock);
90 }
91
92 static void
93 __evtchn_reset_buffer_ring(void)
94 {
95 /* Initialise the ring to empty. Clear errors. */
96 ring_cons = ring_prod = ring_overflow = 0;
97 }
98
99 static int
100 evtchn_read(struct cdev *dev, struct uio *uio, int ioflag)
101 {
102 int rc;
103 unsigned int count, c, p, sst = 0, bytes1 = 0, bytes2 = 0;
104 count = uio->uio_resid;
105
106 count &= ~1; /* even number of bytes */
107
108 if ( count == 0 )
109 {
110 rc = 0;
111 goto out;
112 }
113
114 if ( count > PAGE_SIZE )
115 count = PAGE_SIZE;
116
117 for ( ; ; ) {
118 if ( (c = ring_cons) != (p = ring_prod) )
119 break;
120
121 if ( ring_overflow ) {
122 rc = EFBIG;
123 goto out;
124 }
125
126 if (sst != 0) {
127 rc = EINTR;
128 goto out;
129 }
130
131 /* PCATCH == check for signals before and after sleeping
132 * PWAIT == priority of waiting on resource
133 */
134 sst = tsleep(evtchn_waddr, PWAIT|PCATCH, "evchwt", 10);
135 }
136
137 /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
138 if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 ) {
139 bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(uint16_t);
140 bytes2 = EVTCHN_RING_MASK(p) * sizeof(uint16_t);
141 }
142 else {
143 bytes1 = (p - c) * sizeof(uint16_t);
144 bytes2 = 0;
145 }
146
147 /* Truncate chunks according to caller's maximum byte count. */
148 if ( bytes1 > count ) {
149 bytes1 = count;
150 bytes2 = 0;
151 }
152 else if ( (bytes1 + bytes2) > count ) {
153 bytes2 = count - bytes1;
154 }
155
156 if ( uiomove(&ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
157 ((bytes2 != 0) && uiomove(&ring[0], bytes2, uio)))
158 /* keeping this around as its replacement is not equivalent
159 * copyout(&ring[0], &buf[bytes1], bytes2)
160 */
161 {
162 rc = EFAULT;
163 goto out;
164 }
165
166 ring_cons += (bytes1 + bytes2) / sizeof(uint16_t);
167
168 rc = bytes1 + bytes2;
169
170 out:
171
172 return rc;
173 }
174
175 static int
176 evtchn_write(struct cdev *dev, struct uio *uio, int ioflag)
177 {
178 int rc, i, count;
179
180 count = uio->uio_resid;
181
182 uint16_t *kbuf = (uint16_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK);
183
184
185 if ( kbuf == NULL )
186 return ENOMEM;
187
188 count &= ~1; /* even number of bytes */
189
190 if ( count == 0 ) {
191 rc = 0;
192 goto out;
193 }
194
195 if ( count > PAGE_SIZE )
196 count = PAGE_SIZE;
197
198 if ( uiomove(kbuf, count, uio) != 0 ) {
199 rc = EFAULT;
200 goto out;
201 }
202
203 mtx_lock_spin(&lock);
204 for ( i = 0; i < (count/2); i++ )
205 if ( test_bit(kbuf[i], &bound_ports[0]) )
206 evtchn_unmask_port(kbuf[i]);
207 mtx_unlock_spin(&lock);
208
209 rc = count;
210
211 out:
212 free(kbuf, M_DEVBUF);
213 return rc;
214 }
215
216 static int
217 evtchn_ioctl(struct cdev *dev, unsigned long cmd, caddr_t arg,
218 int mode, struct thread *td __unused)
219 {
220 int rc = 0;
221
222 #ifdef NOTYET
223 mtx_lock_spin(&lock);
224
225 switch ( cmd )
226 {
227 case EVTCHN_RESET:
228 __evtchn_reset_buffer_ring();
229 break;
230 case EVTCHN_BIND:
231 if ( !synch_test_and_set_bit((uintptr_t)arg, &bound_ports[0]) )
232 unmask_evtchn((uintptr_t)arg);
233 else
234 rc = EINVAL;
235 break;
236 case EVTCHN_UNBIND:
237 if ( synch_test_and_clear_bit((uintptr_t)arg, &bound_ports[0]) )
238 mask_evtchn((uintptr_t)arg);
239 else
240 rc = EINVAL;
241 break;
242 default:
243 rc = ENOSYS;
244 break;
245 }
246
247 mtx_unlock_spin(&lock);
248 #endif
249
250 return rc;
251 }
252
253 static int
254 evtchn_poll(struct cdev *dev, int poll_events, struct thread *td)
255 {
256
257 evtchn_softc_t *sc;
258 unsigned int mask = POLLOUT | POLLWRNORM;
259
260 sc = dev->si_drv1;
261
262 if ( ring_cons != ring_prod )
263 mask |= POLLIN | POLLRDNORM;
264 else if ( ring_overflow )
265 mask = POLLERR;
266 else
267 selrecord(td, &sc->ev_rsel);
268
269
270 return mask;
271 }
272
273
274 static int
275 evtchn_open(struct cdev *dev, int flag, int otyp, struct thread *td)
276 {
277 uint16_t *_ring;
278
279 if (flag & O_NONBLOCK)
280 return EBUSY;
281
282 if ( synch_test_and_set_bit(0, &evtchn_dev_inuse) )
283 return EBUSY;
284
285 if ( (_ring = (uint16_t *)malloc(PAGE_SIZE, M_DEVBUF, M_WAITOK)) == NULL )
286 return ENOMEM;
287
288 mtx_lock_spin(&lock);
289 ring = _ring;
290 __evtchn_reset_buffer_ring();
291 mtx_unlock_spin(&lock);
292
293
294 return 0;
295 }
296
297 static int
298 evtchn_close(struct cdev *dev, int flag, int otyp, struct thread *td __unused)
299 {
300 int i;
301
302 if (ring != NULL) {
303 free(ring, M_DEVBUF);
304 ring = NULL;
305 }
306 mtx_lock_spin(&lock);
307 for ( i = 0; i < NR_EVENT_CHANNELS; i++ )
308 if ( synch_test_and_clear_bit(i, &bound_ports[0]) )
309 evtchn_mask_port(i);
310 mtx_unlock_spin(&lock);
311
312 evtchn_dev_inuse = 0;
313
314 return 0;
315 }
316
317 static struct cdevsw evtchn_devsw = {
318 .d_version = D_VERSION,
319 .d_open = evtchn_open,
320 .d_close = evtchn_close,
321 .d_read = evtchn_read,
322 .d_write = evtchn_write,
323 .d_ioctl = evtchn_ioctl,
324 .d_poll = evtchn_poll,
325 .d_name = "evtchn",
326 };
327
328
329 /* XXX - if this device is ever supposed to support use by more than one process
330 * this global static will have to go away
331 */
332 static struct cdev *evtchn_dev;
333
334
335
336 static int
337 evtchn_dev_init(void *dummy __unused)
338 {
339 /* XXX I believe we don't need these leaving them here for now until we
340 * have some semblance of it working
341 */
342 mtx_init(&upcall_lock, "evtchup", NULL, MTX_DEF);
343
344 /* (DEVFS) create '/dev/misc/evtchn'. */
345 evtchn_dev = make_dev(&evtchn_devsw, 0, UID_ROOT, GID_WHEEL, 0600, "xen/evtchn");
346
347 mtx_init(&lock, "evch", NULL, MTX_SPIN | MTX_NOWITNESS);
348
349 evtchn_dev->si_drv1 = malloc(sizeof(evtchn_softc_t), M_DEVBUF, M_WAITOK);
350 bzero(evtchn_dev->si_drv1, sizeof(evtchn_softc_t));
351
352 if (bootverbose)
353 printf("Event-channel device installed.\n");
354
355 return 0;
356 }
357
358 SYSINIT(evtchn_dev_init, SI_SUB_DRIVERS, SI_ORDER_FIRST, evtchn_dev_init, NULL);
Cache object: 54468f8fa6ede423d3d238fc82ce82c0
|