FreeBSD/Linux Kernel Cross Reference
sys/coda/coda_psdev.c
1 /* $NetBSD: coda_psdev.c,v 1.34 2006/11/16 01:32:41 christos Exp $ */
2
3 /*
4 *
5 * Coda: an Experimental Distributed File System
6 * Release 3.1
7 *
8 * Copyright (c) 1987-1998 Carnegie Mellon University
9 * All Rights Reserved
10 *
11 * Permission to use, copy, modify and distribute this software and its
12 * documentation is hereby granted, provided that both the copyright
13 * notice and this permission notice appear in all copies of the
14 * software, derivative works or modified versions, and any portions
15 * thereof, and that both notices appear in supporting documentation, and
16 * that credit is given to Carnegie Mellon University in all documents
17 * and publicity pertaining to direct or indirect use of this code or its
18 * derivatives.
19 *
20 * CODA IS AN EXPERIMENTAL SOFTWARE SYSTEM AND IS KNOWN TO HAVE BUGS,
21 * SOME OF WHICH MAY HAVE SERIOUS CONSEQUENCES. CARNEGIE MELLON ALLOWS
22 * FREE USE OF THIS SOFTWARE IN ITS "AS IS" CONDITION. CARNEGIE MELLON
23 * DISCLAIMS ANY LIABILITY OF ANY KIND FOR ANY DAMAGES WHATSOEVER
24 * RESULTING DIRECTLY OR INDIRECTLY FROM THE USE OF THIS SOFTWARE OR OF
25 * ANY DERIVATIVE WORK.
26 *
27 * Carnegie Mellon encourages users of this software to return any
28 * improvements or extensions that they make, and to grant Carnegie
29 * Mellon the rights to redistribute these changes without encumbrance.
30 *
31 * @(#) coda/coda_psdev.c,v 1.1.1.1 1998/08/29 21:26:45 rvb Exp $
32 */
33
34 /*
35 * Mach Operating System
36 * Copyright (c) 1989 Carnegie-Mellon University
37 * All rights reserved. The CMU software License Agreement specifies
38 * the terms and conditions for use and redistribution.
39 */
40
41 /*
42 * This code was written for the Coda file system at Carnegie Mellon
43 * University. Contributers include David Steere, James Kistler, and
44 * M. Satyanarayanan. */
45
46 /* These routines define the pseudo device for communication between
47 * Coda's Venus and Minicache in Mach 2.6. They used to be in cfs_subr.c,
48 * but I moved them to make it easier to port the Minicache without
49 * porting coda. -- DCS 10/12/94
50 *
51 * Following code depends on file-system CODA.
52 */
53
54 /* These routines are the device entry points for Venus. */
55
56 #include <sys/cdefs.h>
57 __KERNEL_RCSID(0, "$NetBSD: coda_psdev.c,v 1.34 2006/11/16 01:32:41 christos Exp $");
58
59 extern int coda_nc_initialized; /* Set if cache has been initialized */
60
61 #ifdef _LKM
62 #define NVCODA 4
63 #else
64 #include <vcoda.h>
65 #endif
66
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/kernel.h>
70 #include <sys/malloc.h>
71 #include <sys/proc.h>
72 #include <sys/mount.h>
73 #include <sys/file.h>
74 #include <sys/ioctl.h>
75 #include <sys/poll.h>
76 #include <sys/select.h>
77 #include <sys/conf.h>
78
79 #include <miscfs/syncfs/syncfs.h>
80
81 #include <coda/coda.h>
82 #include <coda/cnode.h>
83 #include <coda/coda_namecache.h>
84 #include <coda/coda_io.h>
85
86 #define CTL_C
87
88 int coda_psdev_print_entry = 0;
89 static
90 int outstanding_upcalls = 0;
91 int coda_call_sleep = PZERO - 1;
92 #ifdef CTL_C
93 int coda_pcatch = PCATCH;
94 #else
95 #endif
96
97 #define ENTRY if(coda_psdev_print_entry) myprintf(("Entered %s\n",__func__))
98
99 void vcodaattach(int n);
100
101 dev_type_open(vc_nb_open);
102 dev_type_close(vc_nb_close);
103 dev_type_read(vc_nb_read);
104 dev_type_write(vc_nb_write);
105 dev_type_ioctl(vc_nb_ioctl);
106 dev_type_poll(vc_nb_poll);
107 dev_type_kqfilter(vc_nb_kqfilter);
108
109 const struct cdevsw vcoda_cdevsw = {
110 vc_nb_open, vc_nb_close, vc_nb_read, vc_nb_write, vc_nb_ioctl,
111 nostop, notty, vc_nb_poll, nommap, vc_nb_kqfilter, D_OTHER,
112 };
113
114 struct vmsg {
115 struct queue vm_chain;
116 caddr_t vm_data;
117 u_short vm_flags;
118 u_short vm_inSize; /* Size is at most 5000 bytes */
119 u_short vm_outSize;
120 u_short vm_opcode; /* copied from data to save ptr lookup */
121 int vm_unique;
122 caddr_t vm_sleep; /* Not used by Mach. */
123 };
124
125 #define VM_READ 1
126 #define VM_WRITE 2
127 #define VM_INTR 4
128
129 /* vcodaattach: do nothing */
130 void
131 vcodaattach(int n)
132 {
133 }
134
135 /*
136 * These functions are written for NetBSD.
137 */
138 int
139 vc_nb_open(dev_t dev, int flag, int mode,
140 struct lwp *l)
141 {
142 struct vcomm *vcp;
143
144 ENTRY;
145
146 if (minor(dev) >= NVCODA || minor(dev) < 0)
147 return(ENXIO);
148
149 if (!coda_nc_initialized)
150 coda_nc_init();
151
152 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
153 if (VC_OPEN(vcp))
154 return(EBUSY);
155
156 memset(&(vcp->vc_selproc), 0, sizeof (struct selinfo));
157 INIT_QUEUE(vcp->vc_requests);
158 INIT_QUEUE(vcp->vc_replys);
159 MARK_VC_OPEN(vcp);
160
161 coda_mnttbl[minor(dev)].mi_vfsp = NULL;
162 coda_mnttbl[minor(dev)].mi_rootvp = NULL;
163
164 return(0);
165 }
166
167 int
168 vc_nb_close(dev_t dev, int flag, int mode, struct lwp *l)
169 {
170 struct vcomm *vcp;
171 struct vmsg *vmp, *nvmp = NULL;
172 struct coda_mntinfo *mi;
173 int err;
174
175 ENTRY;
176
177 if (minor(dev) >= NVCODA || minor(dev) < 0)
178 return(ENXIO);
179
180 mi = &coda_mnttbl[minor(dev)];
181 vcp = &(mi->mi_vcomm);
182
183 if (!VC_OPEN(vcp))
184 panic("vcclose: not open");
185
186 /* prevent future operations on this vfs from succeeding by auto-
187 * unmounting any vfs mounted via this device. This frees user or
188 * sysadm from having to remember where all mount points are located.
189 * Put this before WAKEUPs to avoid queuing new messages between
190 * the WAKEUP and the unmount (which can happen if we're unlucky)
191 */
192 if (!mi->mi_rootvp) {
193 /* just a simple open/close w no mount */
194 MARK_VC_CLOSED(vcp);
195 return 0;
196 }
197
198 /* Let unmount know this is for real */
199 /*
200 * XXX Freeze syncer. Must do this before locking the
201 * mount point. See dounmount for details().
202 */
203 lockmgr(&syncer_lock, LK_EXCLUSIVE, NULL);
204 VTOC(mi->mi_rootvp)->c_flags |= C_UNMOUNTING;
205 if (vfs_busy(mi->mi_vfsp, 0, 0)) {
206 lockmgr(&syncer_lock, LK_RELEASE, NULL);
207 return (EBUSY);
208 }
209 coda_unmounting(mi->mi_vfsp);
210
211 /* Wakeup clients so they can return. */
212 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
213 !EOQ(vmp, vcp->vc_requests);
214 vmp = nvmp)
215 {
216 nvmp = (struct vmsg *)GETNEXT(vmp->vm_chain);
217 /* Free signal request messages and don't wakeup cause
218 no one is waiting. */
219 if (vmp->vm_opcode == CODA_SIGNAL) {
220 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
221 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
222 continue;
223 }
224 outstanding_upcalls++;
225 wakeup(&vmp->vm_sleep);
226 }
227
228 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
229 !EOQ(vmp, vcp->vc_replys);
230 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
231 {
232 outstanding_upcalls++;
233 wakeup(&vmp->vm_sleep);
234 }
235
236 MARK_VC_CLOSED(vcp);
237
238 if (outstanding_upcalls) {
239 #ifdef CODA_VERBOSE
240 printf("presleep: outstanding_upcalls = %d\n", outstanding_upcalls);
241 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
242 printf("postsleep: outstanding_upcalls = %d\n", outstanding_upcalls);
243 #else
244 (void) tsleep(&outstanding_upcalls, coda_call_sleep, "coda_umount", 0);
245 #endif
246 }
247
248 err = dounmount(mi->mi_vfsp, flag, l);
249 if (err)
250 myprintf(("Error %d unmounting vfs in vcclose(%d)\n",
251 err, minor(dev)));
252 return 0;
253 }
254
255 int
256 vc_nb_read(dev_t dev, struct uio *uiop, int flag)
257 {
258 struct vcomm * vcp;
259 struct vmsg *vmp;
260 int error = 0;
261
262 ENTRY;
263
264 if (minor(dev) >= NVCODA || minor(dev) < 0)
265 return(ENXIO);
266
267 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
268 /* Get message at head of request queue. */
269 if (EMPTY(vcp->vc_requests))
270 return(0); /* Nothing to read */
271
272 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
273
274 /* Move the input args into userspace */
275 uiop->uio_rw = UIO_READ;
276 error = uiomove(vmp->vm_data, vmp->vm_inSize, uiop);
277 if (error) {
278 myprintf(("vcread: error (%d) on uiomove\n", error));
279 error = EINVAL;
280 }
281
282 #ifdef OLD_DIAGNOSTIC
283 if (vmp->vm_chain.forw == 0 || vmp->vm_chain.back == 0)
284 panic("vc_nb_read: bad chain");
285 #endif
286
287 REMQUE(vmp->vm_chain);
288
289 /* If request was a signal, free up the message and don't
290 enqueue it in the reply queue. */
291 if (vmp->vm_opcode == CODA_SIGNAL) {
292 if (codadebug)
293 myprintf(("vcread: signal msg (%d, %d)\n",
294 vmp->vm_opcode, vmp->vm_unique));
295 CODA_FREE((caddr_t)vmp->vm_data, (u_int)VC_IN_NO_DATA);
296 CODA_FREE((caddr_t)vmp, (u_int)sizeof(struct vmsg));
297 return(error);
298 }
299
300 vmp->vm_flags |= VM_READ;
301 INSQUE(vmp->vm_chain, vcp->vc_replys);
302
303 return(error);
304 }
305
306 int
307 vc_nb_write(dev_t dev, struct uio *uiop, int flag)
308 {
309 struct vcomm * vcp;
310 struct vmsg *vmp;
311 struct coda_out_hdr *out;
312 u_long seq;
313 u_long opcode;
314 int tbuf[2];
315 int error = 0;
316
317 ENTRY;
318
319 if (minor(dev) >= NVCODA || minor(dev) < 0)
320 return(ENXIO);
321
322 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
323
324 /* Peek at the opcode, unique without transfering the data. */
325 uiop->uio_rw = UIO_WRITE;
326 error = uiomove((caddr_t)tbuf, sizeof(int) * 2, uiop);
327 if (error) {
328 myprintf(("vcwrite: error (%d) on uiomove\n", error));
329 return(EINVAL);
330 }
331
332 opcode = tbuf[0];
333 seq = tbuf[1];
334
335 if (codadebug)
336 myprintf(("vcwrite got a call for %ld.%ld\n", opcode, seq));
337
338 if (DOWNCALL(opcode)) {
339 union outputArgs pbuf;
340
341 /* get the rest of the data. */
342 uiop->uio_rw = UIO_WRITE;
343 error = uiomove((caddr_t)&pbuf.coda_purgeuser.oh.result, sizeof(pbuf) - (sizeof(int)*2), uiop);
344 if (error) {
345 myprintf(("vcwrite: error (%d) on uiomove (Op %ld seq %ld)\n",
346 error, opcode, seq));
347 return(EINVAL);
348 }
349
350 return handleDownCall(opcode, &pbuf);
351 }
352
353 /* Look for the message on the (waiting for) reply queue. */
354 for (vmp = (struct vmsg *)GETNEXT(vcp->vc_replys);
355 !EOQ(vmp, vcp->vc_replys);
356 vmp = (struct vmsg *)GETNEXT(vmp->vm_chain))
357 {
358 if (vmp->vm_unique == seq) break;
359 }
360
361 if (EOQ(vmp, vcp->vc_replys)) {
362 if (codadebug)
363 myprintf(("vcwrite: msg (%ld, %ld) not found\n", opcode, seq));
364
365 return(ESRCH);
366 }
367
368 /* Remove the message from the reply queue */
369 REMQUE(vmp->vm_chain);
370
371 /* move data into response buffer. */
372 out = (struct coda_out_hdr *)vmp->vm_data;
373 /* Don't need to copy opcode and uniquifier. */
374
375 /* get the rest of the data. */
376 if (vmp->vm_outSize < uiop->uio_resid) {
377 myprintf(("vcwrite: more data than asked for (%d < %lu)\n",
378 vmp->vm_outSize, (unsigned long) uiop->uio_resid));
379 wakeup(&vmp->vm_sleep); /* Notify caller of the error. */
380 return(EINVAL);
381 }
382
383 tbuf[0] = uiop->uio_resid; /* Save this value. */
384 uiop->uio_rw = UIO_WRITE;
385 error = uiomove((caddr_t) &out->result, vmp->vm_outSize - (sizeof(int) * 2), uiop);
386 if (error) {
387 myprintf(("vcwrite: error (%d) on uiomove (op %ld seq %ld)\n",
388 error, opcode, seq));
389 return(EINVAL);
390 }
391
392 /* I don't think these are used, but just in case. */
393 /* XXX - aren't these two already correct? -bnoble */
394 out->opcode = opcode;
395 out->unique = seq;
396 vmp->vm_outSize = tbuf[0]; /* Amount of data transferred? */
397 vmp->vm_flags |= VM_WRITE;
398 wakeup(&vmp->vm_sleep);
399
400 return(0);
401 }
402
403 int
404 vc_nb_ioctl(dev_t dev, u_long cmd, caddr_t addr, int flag,
405 struct lwp *l)
406 {
407 ENTRY;
408
409 switch(cmd) {
410 case CODARESIZE: {
411 struct coda_resize *data = (struct coda_resize *)addr;
412 return(coda_nc_resize(data->hashsize, data->heapsize, IS_DOWNCALL));
413 break;
414 }
415 case CODASTATS:
416 if (coda_nc_use) {
417 coda_nc_gather_stats();
418 return(0);
419 } else {
420 return(ENODEV);
421 }
422 break;
423 case CODAPRINT:
424 if (coda_nc_use) {
425 print_coda_nc();
426 return(0);
427 } else {
428 return(ENODEV);
429 }
430 break;
431 case CIOC_KERNEL_VERSION:
432 switch (*(u_int *)addr) {
433 case 0:
434 *(u_int *)addr = coda_kernel_version;
435 return 0;
436 break;
437 case 1:
438 case 2:
439 if (coda_kernel_version != *(u_int *)addr)
440 return ENOENT;
441 else
442 return 0;
443 default:
444 return ENOENT;
445 }
446 break;
447 default :
448 return(EINVAL);
449 break;
450 }
451 }
452
453 int
454 vc_nb_poll(dev_t dev, int events, struct lwp *l)
455 {
456 struct vcomm *vcp;
457 int event_msk = 0;
458
459 ENTRY;
460
461 if (minor(dev) >= NVCODA || minor(dev) < 0)
462 return(ENXIO);
463
464 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
465
466 event_msk = events & (POLLIN|POLLRDNORM);
467 if (!event_msk)
468 return(0);
469
470 if (!EMPTY(vcp->vc_requests))
471 return(events & (POLLIN|POLLRDNORM));
472
473 selrecord(l, &(vcp->vc_selproc));
474
475 return(0);
476 }
477
478 static void
479 filt_vc_nb_detach(struct knote *kn)
480 {
481 struct vcomm *vcp = kn->kn_hook;
482
483 SLIST_REMOVE(&vcp->vc_selproc.sel_klist, kn, knote, kn_selnext);
484 }
485
486 static int
487 filt_vc_nb_read(struct knote *kn, long hint)
488 {
489 struct vcomm *vcp = kn->kn_hook;
490 struct vmsg *vmp;
491
492 if (EMPTY(vcp->vc_requests))
493 return (0);
494
495 vmp = (struct vmsg *)GETNEXT(vcp->vc_requests);
496
497 kn->kn_data = vmp->vm_inSize;
498 return (1);
499 }
500
501 static const struct filterops vc_nb_read_filtops =
502 { 1, NULL, filt_vc_nb_detach, filt_vc_nb_read };
503
504 int
505 vc_nb_kqfilter(dev_t dev, struct knote *kn)
506 {
507 struct vcomm *vcp;
508 struct klist *klist;
509
510 ENTRY;
511
512 if (minor(dev) >= NVCODA || minor(dev) < 0)
513 return(ENXIO);
514
515 vcp = &coda_mnttbl[minor(dev)].mi_vcomm;
516
517 switch (kn->kn_filter) {
518 case EVFILT_READ:
519 klist = &vcp->vc_selproc.sel_klist;
520 kn->kn_fop = &vc_nb_read_filtops;
521 break;
522
523 default:
524 return (1);
525 }
526
527 kn->kn_hook = vcp;
528
529 SLIST_INSERT_HEAD(klist, kn, kn_selnext);
530
531 return (0);
532 }
533
534 /*
535 * Statistics
536 */
537 struct coda_clstat coda_clstat;
538
539 /*
540 * Key question: whether to sleep interruptably or uninterruptably when
541 * waiting for Venus. The former seems better (cause you can ^C a
542 * job), but then GNU-EMACS completion breaks. Use tsleep with no
543 * timeout, and no longjmp happens. But, when sleeping
544 * "uninterruptibly", we don't get told if it returns abnormally
545 * (e.g. kill -9).
546 */
547
548 int
549 coda_call(struct coda_mntinfo *mntinfo, int inSize, int *outSize,
550 caddr_t buffer)
551 {
552 struct vcomm *vcp;
553 struct vmsg *vmp;
554 int error;
555 #ifdef CTL_C
556 struct lwp *l = curlwp;
557 struct proc *p = l->l_proc;
558 sigset_t psig_omask;
559 int i;
560 psig_omask = l->l_proc->p_sigctx.ps_siglist; /* array assignment */
561 #endif
562 if (mntinfo == NULL) {
563 /* Unlikely, but could be a race condition with a dying warden */
564 return ENODEV;
565 }
566
567 vcp = &(mntinfo->mi_vcomm);
568
569 coda_clstat.ncalls++;
570 coda_clstat.reqs[((struct coda_in_hdr *)buffer)->opcode]++;
571
572 if (!VC_OPEN(vcp))
573 return(ENODEV);
574
575 CODA_ALLOC(vmp,struct vmsg *,sizeof(struct vmsg));
576 /* Format the request message. */
577 vmp->vm_data = buffer;
578 vmp->vm_flags = 0;
579 vmp->vm_inSize = inSize;
580 vmp->vm_outSize
581 = *outSize ? *outSize : inSize; /* |buffer| >= inSize */
582 vmp->vm_opcode = ((struct coda_in_hdr *)buffer)->opcode;
583 vmp->vm_unique = ++vcp->vc_seq;
584 if (codadebug)
585 myprintf(("Doing a call for %d.%d\n",
586 vmp->vm_opcode, vmp->vm_unique));
587
588 /* Fill in the common input args. */
589 ((struct coda_in_hdr *)buffer)->unique = vmp->vm_unique;
590
591 /* Append msg to request queue and poke Venus. */
592 INSQUE(vmp->vm_chain, vcp->vc_requests);
593 selnotify(&(vcp->vc_selproc), 0);
594
595 /* We can be interrupted while we wait for Venus to process
596 * our request. If the interrupt occurs before Venus has read
597 * the request, we dequeue and return. If it occurs after the
598 * read but before the reply, we dequeue, send a signal
599 * message, and return. If it occurs after the reply we ignore
600 * it. In no case do we want to restart the syscall. If it
601 * was interrupted by a venus shutdown (vcclose), return
602 * ENODEV. */
603
604 /* Ignore return, We have to check anyway */
605 #ifdef CTL_C
606 /* This is work in progress. Setting coda_pcatch lets tsleep reawaken
607 on a ^c or ^z. The problem is that emacs sets certain interrupts
608 as SA_RESTART. This means that we should exit sleep handle the
609 "signal" and then go to sleep again. Mostly this is done by letting
610 the syscall complete and be restarted. We are not idempotent and
611 can not do this. A better solution is necessary.
612 */
613 i = 0;
614 do {
615 error = tsleep(&vmp->vm_sleep, (coda_call_sleep|coda_pcatch), "coda_call", hz*2);
616 if (error == 0)
617 break;
618 else if (error == EWOULDBLOCK) {
619 #ifdef CODA_VERBOSE
620 printf("coda_call: tsleep TIMEOUT %d sec\n", 2+2*i);
621 #endif
622 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGIO)) {
623 sigaddset(&p->p_sigctx.ps_sigmask, SIGIO);
624 #ifdef CODA_VERBOSE
625 printf("coda_call: tsleep returns %d SIGIO, cnt %d\n", error, i);
626 #endif
627 } else if (sigismember(&p->p_sigctx.ps_siglist, SIGALRM)) {
628 sigaddset(&p->p_sigctx.ps_sigmask, SIGALRM);
629 #ifdef CODA_VERBOSE
630 printf("coda_call: tsleep returns %d SIGALRM, cnt %d\n", error, i);
631 #endif
632 } else {
633 sigset_t tmp;
634 tmp = p->p_sigctx.ps_siglist; /* array assignment */
635 sigminusset(&p->p_sigctx.ps_sigmask, &tmp);
636
637 #ifdef CODA_VERBOSE
638 printf("coda_call: tsleep returns %d, cnt %d\n", error, i);
639 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x, mask %x.%x.%x.%x\n",
640 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
641 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
642 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
643 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3],
644 tmp.__bits[0], tmp.__bits[1], tmp.__bits[2], tmp.__bits[3]);
645 #endif
646 break;
647 #ifdef notyet
648 sigminusset(&p->p_sigctx.ps_sigmask, &p->p_sigctx.ps_siglist);
649 printf("coda_call: siglist = %x.%x.%x.%x, sigmask = %x.%x.%x.%x\n",
650 p->p_sigctx.ps_siglist.__bits[0], p->p_sigctx.ps_siglist.__bits[1],
651 p->p_sigctx.ps_siglist.__bits[2], p->p_sigctx.ps_siglist.__bits[3],
652 p->p_sigctx.ps_sigmask.__bits[0], p->p_sigctx.ps_sigmask.__bits[1],
653 p->p_sigctx.ps_sigmask.__bits[2], p->p_sigctx.ps_sigmask.__bits[3]);
654 #endif
655 }
656 } while (error && i++ < 128 && VC_OPEN(vcp));
657 p->p_sigctx.ps_siglist = psig_omask; /* array assignment */
658 #else
659 (void) tsleep(&vmp->vm_sleep, coda_call_sleep, "coda_call", 0);
660 #endif
661 if (VC_OPEN(vcp)) { /* Venus is still alive */
662 /* Op went through, interrupt or not... */
663 if (vmp->vm_flags & VM_WRITE) {
664 error = 0;
665 *outSize = vmp->vm_outSize;
666 }
667
668 else if (!(vmp->vm_flags & VM_READ)) {
669 /* Interrupted before venus read it. */
670 #ifdef CODA_VERBOSE
671 if (1)
672 #else
673 if (codadebug)
674 #endif
675 myprintf(("interrupted before read: op = %d.%d, flags = %x\n",
676 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
677 REMQUE(vmp->vm_chain);
678 error = EINTR;
679 }
680
681 else {
682 /* (!(vmp->vm_flags & VM_WRITE)) means interrupted after
683 upcall started */
684 /* Interrupted after start of upcall, send venus a signal */
685 struct coda_in_hdr *dog;
686 struct vmsg *svmp;
687
688 #ifdef CODA_VERBOSE
689 if (1)
690 #else
691 if (codadebug)
692 #endif
693 myprintf(("Sending Venus a signal: op = %d.%d, flags = %x\n",
694 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
695
696 REMQUE(vmp->vm_chain);
697 error = EINTR;
698
699 CODA_ALLOC(svmp, struct vmsg *, sizeof (struct vmsg));
700
701 CODA_ALLOC((svmp->vm_data), char *, sizeof (struct coda_in_hdr));
702 dog = (struct coda_in_hdr *)svmp->vm_data;
703
704 svmp->vm_flags = 0;
705 dog->opcode = svmp->vm_opcode = CODA_SIGNAL;
706 dog->unique = svmp->vm_unique = vmp->vm_unique;
707 svmp->vm_inSize = sizeof (struct coda_in_hdr);
708 /*??? rvb */ svmp->vm_outSize = sizeof (struct coda_in_hdr);
709
710 if (codadebug)
711 myprintf(("coda_call: enqueing signal msg (%d, %d)\n",
712 svmp->vm_opcode, svmp->vm_unique));
713
714 /* insert at head of queue! */
715 INSQUE(svmp->vm_chain, vcp->vc_requests);
716 selnotify(&(vcp->vc_selproc), 0);
717 }
718 }
719
720 else { /* If venus died (!VC_OPEN(vcp)) */
721 if (codadebug)
722 myprintf(("vcclose woke op %d.%d flags %d\n",
723 vmp->vm_opcode, vmp->vm_unique, vmp->vm_flags));
724
725 error = ENODEV;
726 }
727
728 CODA_FREE(vmp, sizeof(struct vmsg));
729
730 if (outstanding_upcalls > 0 && (--outstanding_upcalls == 0))
731 wakeup(&outstanding_upcalls);
732
733 if (!error)
734 error = ((struct coda_out_hdr *)buffer)->result;
735 return(error);
736 }
737
Cache object: e5fddcb9e6f8f75dcb31865caf50c23a
|