1 /*-
2 * Copyright (c) 2005 David Xu <davidxu@freebsd.org>
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
13 *
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24 * SUCH DAMAGE.
25 *
26 */
27
28 /*
29 * POSIX message queue implementation.
30 *
31 * 1) A mqueue filesystem can be mounted, each message queue appears
32 * in mounted directory, user can change queue's permission and
33 * ownership, or remove a queue. Manually creating a file in the
34 * directory causes a message queue to be created in the kernel with
35 * default message queue attributes applied and same name used, this
36 * method is not advocated since mq_open syscall allows user to specify
37 * different attributes. Also the file system can be mounted multiple
38 * times at different mount points but shows same contents.
39 *
40 * 2) Standard POSIX message queue API. The syscalls do not use vfs layer,
41 * but directly operate on internal data structure, this allows user to
42 * use the IPC facility without having to mount mqueue file system.
43 */
44
45 #include <sys/cdefs.h>
46 __FBSDID("$FreeBSD: releng/7.4/sys/kern/uipc_mqueue.c 202814 2010-01-22 17:02:07Z jhb $");
47
48 #include <sys/param.h>
49 #include <sys/kernel.h>
50 #include <sys/systm.h>
51 #include <sys/limits.h>
52 #include <sys/buf.h>
53 #include <sys/dirent.h>
54 #include <sys/event.h>
55 #include <sys/eventhandler.h>
56 #include <sys/fcntl.h>
57 #include <sys/file.h>
58 #include <sys/filedesc.h>
59 #include <sys/lock.h>
60 #include <sys/malloc.h>
61 #include <sys/module.h>
62 #include <sys/mount.h>
63 #include <sys/mqueue.h>
64 #include <sys/mutex.h>
65 #include <sys/namei.h>
66 #include <sys/posix4.h>
67 #include <sys/poll.h>
68 #include <sys/priv.h>
69 #include <sys/proc.h>
70 #include <sys/queue.h>
71 #include <sys/sysproto.h>
72 #include <sys/stat.h>
73 #include <sys/syscall.h>
74 #include <sys/syscallsubr.h>
75 #include <sys/sysent.h>
76 #include <sys/sx.h>
77 #include <sys/sysctl.h>
78 #include <sys/taskqueue.h>
79 #include <sys/unistd.h>
80 #include <sys/vnode.h>
81 #include <machine/atomic.h>
82
83 /*
84 * Limits and constants
85 */
86 #define MQFS_NAMELEN NAME_MAX
87 #define MQFS_DELEN (8 + MQFS_NAMELEN)
88
89 /* node types */
90 typedef enum {
91 mqfstype_none = 0,
92 mqfstype_root,
93 mqfstype_dir,
94 mqfstype_this,
95 mqfstype_parent,
96 mqfstype_file,
97 mqfstype_symlink,
98 } mqfs_type_t;
99
100 struct mqfs_node;
101
102 /*
103 * mqfs_info: describes a mqfs instance
104 */
105 struct mqfs_info {
106 struct sx mi_lock;
107 struct mqfs_node *mi_root;
108 struct unrhdr *mi_unrhdr;
109 };
110
111 struct mqfs_vdata {
112 LIST_ENTRY(mqfs_vdata) mv_link;
113 struct mqfs_node *mv_node;
114 struct vnode *mv_vnode;
115 struct task mv_task;
116 };
117
118 /*
119 * mqfs_node: describes a node (file or directory) within a mqfs
120 */
121 struct mqfs_node {
122 char mn_name[MQFS_NAMELEN+1];
123 struct mqfs_info *mn_info;
124 struct mqfs_node *mn_parent;
125 LIST_HEAD(,mqfs_node) mn_children;
126 LIST_ENTRY(mqfs_node) mn_sibling;
127 LIST_HEAD(,mqfs_vdata) mn_vnodes;
128 int mn_refcount;
129 mqfs_type_t mn_type;
130 int mn_deleted;
131 u_int32_t mn_fileno;
132 void *mn_data;
133 struct timespec mn_birth;
134 struct timespec mn_ctime;
135 struct timespec mn_atime;
136 struct timespec mn_mtime;
137 uid_t mn_uid;
138 gid_t mn_gid;
139 int mn_mode;
140 };
141
142 #define VTON(vp) (((struct mqfs_vdata *)((vp)->v_data))->mv_node)
143 #define VTOMQ(vp) ((struct mqueue *)(VTON(vp)->mn_data))
144 #define VFSTOMQFS(m) ((struct mqfs_info *)((m)->mnt_data))
145 #define FPTOMQ(fp) ((struct mqueue *)(((struct mqfs_node *) \
146 (fp)->f_data)->mn_data))
147
148 TAILQ_HEAD(msgq, mqueue_msg);
149
150 struct mqueue;
151
152 struct mqueue_notifier {
153 LIST_ENTRY(mqueue_notifier) nt_link;
154 struct sigevent nt_sigev;
155 ksiginfo_t nt_ksi;
156 struct proc *nt_proc;
157 };
158
159 struct mqueue {
160 struct mtx mq_mutex;
161 int mq_flags;
162 long mq_maxmsg;
163 long mq_msgsize;
164 long mq_curmsgs;
165 long mq_totalbytes;
166 struct msgq mq_msgq;
167 int mq_receivers;
168 int mq_senders;
169 struct selinfo mq_rsel;
170 struct selinfo mq_wsel;
171 struct mqueue_notifier *mq_notifier;
172 };
173
174 #define MQ_RSEL 0x01
175 #define MQ_WSEL 0x02
176
177 struct mqueue_msg {
178 TAILQ_ENTRY(mqueue_msg) msg_link;
179 unsigned int msg_prio;
180 unsigned int msg_size;
181 /* following real data... */
182 };
183
184 SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0,
185 "POSIX real time message queue");
186
187 static int default_maxmsg = 10;
188 static int default_msgsize = 1024;
189
190 static int maxmsg = 100;
191 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,
192 &maxmsg, 0, "Default maximum messages in queue");
193 static int maxmsgsize = 16384;
194 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,
195 &maxmsgsize, 0, "Default maximum message size");
196 static int maxmq = 100;
197 SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,
198 &maxmq, 0, "maximum message queues");
199 static int curmq = 0;
200 SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,
201 &curmq, 0, "current message queue number");
202 static int unloadable = 0;
203 static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data");
204
205 static eventhandler_tag exit_tag;
206
207 /* Only one instance per-system */
208 static struct mqfs_info mqfs_data;
209 static uma_zone_t mqnode_zone;
210 static uma_zone_t mqueue_zone;
211 static uma_zone_t mvdata_zone;
212 static uma_zone_t mqnoti_zone;
213 static struct vop_vector mqfs_vnodeops;
214 static struct fileops mqueueops;
215
216 /*
217 * Directory structure construction and manipulation
218 */
219 #ifdef notyet
220 static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent,
221 const char *name, int namelen, struct ucred *cred, int mode);
222 static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent,
223 const char *name, int namelen, struct ucred *cred, int mode);
224 #endif
225
226 static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent,
227 const char *name, int namelen, struct ucred *cred, int mode);
228 static int mqfs_destroy(struct mqfs_node *mn);
229 static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn);
230 static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn);
231 static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn);
232
233 /*
234 * Message queue construction and maniplation
235 */
236 static struct mqueue *mqueue_alloc(const struct mq_attr *attr);
237 static void mqueue_free(struct mqueue *mq);
238 static int mqueue_send(struct mqueue *mq, const char *msg_ptr,
239 size_t msg_len, unsigned msg_prio, int waitok,
240 const struct timespec *abs_timeout);
241 static int mqueue_receive(struct mqueue *mq, char *msg_ptr,
242 size_t msg_len, unsigned *msg_prio, int waitok,
243 const struct timespec *abs_timeout);
244 static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg,
245 int timo);
246 static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg,
247 int timo);
248 static void mqueue_send_notification(struct mqueue *mq);
249 static void mqueue_fdclose(struct thread *td, int fd, struct file *fp);
250 static void mq_proc_exit(void *arg, struct proc *p);
251
252 /*
253 * kqueue filters
254 */
255 static void filt_mqdetach(struct knote *kn);
256 static int filt_mqread(struct knote *kn, long hint);
257 static int filt_mqwrite(struct knote *kn, long hint);
258
259 struct filterops mq_rfiltops =
260 { 1, NULL, filt_mqdetach, filt_mqread };
261 struct filterops mq_wfiltops =
262 { 1, NULL, filt_mqdetach, filt_mqwrite };
263
264 /*
265 * Initialize fileno bitmap
266 */
267 static void
268 mqfs_fileno_init(struct mqfs_info *mi)
269 {
270 struct unrhdr *up;
271
272 up = new_unrhdr(1, INT_MAX, NULL);
273 mi->mi_unrhdr = up;
274 }
275
276 /*
277 * Tear down fileno bitmap
278 */
279 static void
280 mqfs_fileno_uninit(struct mqfs_info *mi)
281 {
282 struct unrhdr *up;
283
284 up = mi->mi_unrhdr;
285 mi->mi_unrhdr = NULL;
286 delete_unrhdr(up);
287 }
288
289 /*
290 * Allocate a file number
291 */
292 static void
293 mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn)
294 {
295 /* make sure our parent has a file number */
296 if (mn->mn_parent && !mn->mn_parent->mn_fileno)
297 mqfs_fileno_alloc(mi, mn->mn_parent);
298
299 switch (mn->mn_type) {
300 case mqfstype_root:
301 case mqfstype_dir:
302 case mqfstype_file:
303 case mqfstype_symlink:
304 mn->mn_fileno = alloc_unr(mi->mi_unrhdr);
305 break;
306 case mqfstype_this:
307 KASSERT(mn->mn_parent != NULL,
308 ("mqfstype_this node has no parent"));
309 mn->mn_fileno = mn->mn_parent->mn_fileno;
310 break;
311 case mqfstype_parent:
312 KASSERT(mn->mn_parent != NULL,
313 ("mqfstype_parent node has no parent"));
314 if (mn->mn_parent == mi->mi_root) {
315 mn->mn_fileno = mn->mn_parent->mn_fileno;
316 break;
317 }
318 KASSERT(mn->mn_parent->mn_parent != NULL,
319 ("mqfstype_parent node has no grandparent"));
320 mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno;
321 break;
322 default:
323 KASSERT(0,
324 ("mqfs_fileno_alloc() called for unknown type node: %d",
325 mn->mn_type));
326 break;
327 }
328 }
329
330 /*
331 * Release a file number
332 */
333 static void
334 mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn)
335 {
336 switch (mn->mn_type) {
337 case mqfstype_root:
338 case mqfstype_dir:
339 case mqfstype_file:
340 case mqfstype_symlink:
341 free_unr(mi->mi_unrhdr, mn->mn_fileno);
342 break;
343 case mqfstype_this:
344 case mqfstype_parent:
345 /* ignore these, as they don't "own" their file number */
346 break;
347 default:
348 KASSERT(0,
349 ("mqfs_fileno_free() called for unknown type node: %d",
350 mn->mn_type));
351 break;
352 }
353 }
354
355 static __inline struct mqfs_node *
356 mqnode_alloc(void)
357 {
358 return uma_zalloc(mqnode_zone, M_WAITOK | M_ZERO);
359 }
360
361 static __inline void
362 mqnode_free(struct mqfs_node *node)
363 {
364 uma_zfree(mqnode_zone, node);
365 }
366
367 static __inline void
368 mqnode_addref(struct mqfs_node *node)
369 {
370 atomic_fetchadd_int(&node->mn_refcount, 1);
371 }
372
373 static __inline void
374 mqnode_release(struct mqfs_node *node)
375 {
376 int old, exp;
377
378 old = atomic_fetchadd_int(&node->mn_refcount, -1);
379 if (node->mn_type == mqfstype_dir ||
380 node->mn_type == mqfstype_root)
381 exp = 3; /* include . and .. */
382 else
383 exp = 1;
384 if (old == exp)
385 mqfs_destroy(node);
386 }
387
388 /*
389 * Add a node to a directory
390 */
391 static int
392 mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node)
393 {
394 KASSERT(parent != NULL, ("%s(): parent is NULL", __func__));
395 KASSERT(parent->mn_info != NULL,
396 ("%s(): parent has no mn_info", __func__));
397 KASSERT(parent->mn_type == mqfstype_dir ||
398 parent->mn_type == mqfstype_root,
399 ("%s(): parent is not a directory", __func__));
400
401 node->mn_info = parent->mn_info;
402 node->mn_parent = parent;
403 LIST_INIT(&node->mn_children);
404 LIST_INIT(&node->mn_vnodes);
405 LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling);
406 mqnode_addref(parent);
407 return (0);
408 }
409
410 static struct mqfs_node *
411 mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode,
412 int nodetype)
413 {
414 struct mqfs_node *node;
415
416 node = mqnode_alloc();
417 strncpy(node->mn_name, name, namelen);
418 node->mn_type = nodetype;
419 node->mn_refcount = 1;
420 getnanotime(&node->mn_birth);
421 node->mn_ctime = node->mn_atime = node->mn_mtime
422 = node->mn_birth;
423 node->mn_uid = cred->cr_uid;
424 node->mn_gid = cred->cr_gid;
425 node->mn_mode = mode;
426 return (node);
427 }
428
429 /*
430 * Create a file
431 */
432 static struct mqfs_node *
433 mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen,
434 struct ucred *cred, int mode)
435 {
436 struct mqfs_node *node;
437
438 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file);
439 if (mqfs_add_node(parent, node) != 0) {
440 mqnode_free(node);
441 return (NULL);
442 }
443 return (node);
444 }
445
446 /*
447 * Add . and .. to a directory
448 */
449 static int
450 mqfs_fixup_dir(struct mqfs_node *parent)
451 {
452 struct mqfs_node *dir;
453
454 dir = mqnode_alloc();
455 dir->mn_name[0] = '.';
456 dir->mn_type = mqfstype_this;
457 dir->mn_refcount = 1;
458 if (mqfs_add_node(parent, dir) != 0) {
459 mqnode_free(dir);
460 return (-1);
461 }
462
463 dir = mqnode_alloc();
464 dir->mn_name[0] = dir->mn_name[1] = '.';
465 dir->mn_type = mqfstype_parent;
466 dir->mn_refcount = 1;
467
468 if (mqfs_add_node(parent, dir) != 0) {
469 mqnode_free(dir);
470 return (-1);
471 }
472
473 return (0);
474 }
475
476 #ifdef notyet
477
478 /*
479 * Create a directory
480 */
481 static struct mqfs_node *
482 mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen,
483 struct ucred *cred, int mode)
484 {
485 struct mqfs_node *node;
486
487 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir);
488 if (mqfs_add_node(parent, node) != 0) {
489 mqnode_free(node);
490 return (NULL);
491 }
492
493 if (mqfs_fixup_dir(node) != 0) {
494 mqfs_destroy(node);
495 return (NULL);
496 }
497 return (node);
498 }
499
500 /*
501 * Create a symlink
502 */
503 static struct mqfs_node *
504 mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen,
505 struct ucred *cred, int mode)
506 {
507 struct mqfs_node *node;
508
509 node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink);
510 if (mqfs_add_node(parent, node) != 0) {
511 mqnode_free(node);
512 return (NULL);
513 }
514 return (node);
515 }
516
517 #endif
518
519 /*
520 * Destroy a node or a tree of nodes
521 */
522 static int
523 mqfs_destroy(struct mqfs_node *node)
524 {
525 struct mqfs_node *parent;
526
527 KASSERT(node != NULL,
528 ("%s(): node is NULL", __func__));
529 KASSERT(node->mn_info != NULL,
530 ("%s(): node has no mn_info", __func__));
531
532 /* destroy children */
533 if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root)
534 while (! LIST_EMPTY(&node->mn_children))
535 mqfs_destroy(LIST_FIRST(&node->mn_children));
536
537 /* unlink from parent */
538 if ((parent = node->mn_parent) != NULL) {
539 KASSERT(parent->mn_info == node->mn_info,
540 ("%s(): parent has different mn_info", __func__));
541 LIST_REMOVE(node, mn_sibling);
542 }
543
544 if (node->mn_fileno != 0)
545 mqfs_fileno_free(node->mn_info, node);
546 if (node->mn_data != NULL)
547 mqueue_free(node->mn_data);
548 mqnode_free(node);
549 return (0);
550 }
551
552 /*
553 * Mount a mqfs instance
554 */
555 static int
556 mqfs_mount(struct mount *mp, struct thread *td)
557 {
558 struct statfs *sbp;
559
560 if (mp->mnt_flag & MNT_UPDATE)
561 return (EOPNOTSUPP);
562
563 mp->mnt_data = &mqfs_data;
564 MNT_ILOCK(mp);
565 mp->mnt_flag |= MNT_LOCAL;
566 mp->mnt_kern_flag |= MNTK_MPSAFE;
567 MNT_IUNLOCK(mp);
568 vfs_getnewfsid(mp);
569
570 sbp = &mp->mnt_stat;
571 vfs_mountedfrom(mp, "mqueue");
572 sbp->f_bsize = PAGE_SIZE;
573 sbp->f_iosize = PAGE_SIZE;
574 sbp->f_blocks = 1;
575 sbp->f_bfree = 0;
576 sbp->f_bavail = 0;
577 sbp->f_files = 1;
578 sbp->f_ffree = 0;
579 return (0);
580 }
581
582 /*
583 * Unmount a mqfs instance
584 */
585 static int
586 mqfs_unmount(struct mount *mp, int mntflags, struct thread *td)
587 {
588 int error;
589
590 error = vflush(mp, 0, (mntflags & MNT_FORCE) ? FORCECLOSE : 0, td);
591 return (error);
592 }
593
594 /*
595 * Return a root vnode
596 */
597 static int
598 mqfs_root(struct mount *mp, int flags, struct vnode **vpp, struct thread *td)
599 {
600 struct mqfs_info *mqfs;
601 int ret;
602
603 mqfs = VFSTOMQFS(mp);
604 sx_xlock(&mqfs->mi_lock);
605 ret = mqfs_allocv(mp, vpp, mqfs->mi_root);
606 sx_xunlock(&mqfs->mi_lock);
607 return (ret);
608 }
609
610 /*
611 * Return filesystem stats
612 */
613 static int
614 mqfs_statfs(struct mount *mp, struct statfs *sbp, struct thread *td)
615 {
616 /* XXX update statistics */
617 return (0);
618 }
619
620 /*
621 * Initialize a mqfs instance
622 */
623 static int
624 mqfs_init(struct vfsconf *vfc)
625 {
626 struct mqfs_node *root;
627 struct mqfs_info *mi;
628
629 mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node),
630 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
631 mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue),
632 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
633 mvdata_zone = uma_zcreate("mvdata",
634 sizeof(struct mqfs_vdata), NULL, NULL, NULL,
635 NULL, UMA_ALIGN_PTR, 0);
636 mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier),
637 NULL, NULL, NULL, NULL, UMA_ALIGN_PTR, 0);
638 mi = &mqfs_data;
639 sx_init(&mi->mi_lock, "mqfs lock");
640 /* set up the root diretory */
641 root = mqfs_create_node("/", 1, curthread->td_ucred, 01777,
642 mqfstype_root);
643 root->mn_info = mi;
644 LIST_INIT(&root->mn_children);
645 LIST_INIT(&root->mn_vnodes);
646 mi->mi_root = root;
647 mqfs_fileno_init(mi);
648 mqfs_fileno_alloc(mi, root);
649 mqfs_fixup_dir(root);
650 exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,
651 EVENTHANDLER_PRI_ANY);
652 mq_fdclose = mqueue_fdclose;
653 p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING, _POSIX_MESSAGE_PASSING);
654 return (0);
655 }
656
657 /*
658 * Destroy a mqfs instance
659 */
660 static int
661 mqfs_uninit(struct vfsconf *vfc)
662 {
663 struct mqfs_info *mi;
664
665 if (!unloadable)
666 return (EOPNOTSUPP);
667 EVENTHANDLER_DEREGISTER(process_exit, exit_tag);
668 mi = &mqfs_data;
669 mqfs_destroy(mi->mi_root);
670 mi->mi_root = NULL;
671 mqfs_fileno_uninit(mi);
672 sx_destroy(&mi->mi_lock);
673 uma_zdestroy(mqnode_zone);
674 uma_zdestroy(mqueue_zone);
675 uma_zdestroy(mvdata_zone);
676 uma_zdestroy(mqnoti_zone);
677 return (0);
678 }
679
680 /*
681 * task routine
682 */
683 static void
684 do_recycle(void *context, int pending __unused)
685 {
686 struct vnode *vp = (struct vnode *)context;
687
688 vrecycle(vp, curthread);
689 vdrop(vp);
690 }
691
692 /*
693 * Allocate a vnode
694 */
695 static int
696 mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn)
697 {
698 struct mqfs_vdata *vd;
699 int error;
700
701 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
702 if (vd->mv_vnode->v_mount == mp)
703 break;
704 }
705
706 if (vd != NULL) {
707 if (vget(vd->mv_vnode, 0, curthread) == 0) {
708 *vpp = vd->mv_vnode;
709 vn_lock(*vpp, LK_RETRY | LK_EXCLUSIVE,
710 curthread);
711 return (0);
712 }
713 /* XXX if this can happen, we're in trouble */
714 }
715
716 error = getnewvnode("mqueue", mp, &mqfs_vnodeops, vpp);
717 if (error)
718 return (error);
719 vn_lock(*vpp, LK_EXCLUSIVE | LK_RETRY, curthread);
720 error = insmntque(*vpp, mp);
721 if (error != 0) {
722 *vpp = NULLVP;
723 return (error);
724 }
725 vd = uma_zalloc(mvdata_zone, M_WAITOK);
726 (*vpp)->v_data = vd;
727 vd->mv_vnode = *vpp;
728 vd->mv_node = pn;
729 TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp);
730 LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link);
731 mqnode_addref(pn);
732 switch (pn->mn_type) {
733 case mqfstype_root:
734 (*vpp)->v_vflag = VV_ROOT;
735 /* fall through */
736 case mqfstype_dir:
737 case mqfstype_this:
738 case mqfstype_parent:
739 (*vpp)->v_type = VDIR;
740 break;
741 case mqfstype_file:
742 (*vpp)->v_type = VREG;
743 break;
744 case mqfstype_symlink:
745 (*vpp)->v_type = VLNK;
746 break;
747 case mqfstype_none:
748 KASSERT(0, ("mqfs_allocf called for null node\n"));
749 default:
750 panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type);
751 }
752 return (0);
753 }
754
755 /*
756 * Search a directory entry
757 */
758 static struct mqfs_node *
759 mqfs_search(struct mqfs_node *pd, const char *name, int len)
760 {
761 struct mqfs_node *pn;
762
763 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
764 if (strncmp(pn->mn_name, name, len) == 0)
765 return (pn);
766 }
767 return (NULL);
768 }
769
770 /*
771 * Look up a file or directory.
772 */
773 static int
774 mqfs_lookupx(struct vop_cachedlookup_args *ap)
775 {
776 struct componentname *cnp;
777 struct vnode *dvp, **vpp;
778 struct mqfs_node *pd;
779 struct mqfs_node *pn;
780 int nameiop, flags, error, namelen;
781 char *pname;
782 struct thread *td;
783
784 cnp = ap->a_cnp;
785 vpp = ap->a_vpp;
786 dvp = ap->a_dvp;
787 pname = cnp->cn_nameptr;
788 namelen = cnp->cn_namelen;
789 td = cnp->cn_thread;
790 flags = cnp->cn_flags;
791 nameiop = cnp->cn_nameiop;
792 pd = VTON(dvp);
793 pn = NULL;
794 *vpp = NULLVP;
795
796 if (dvp->v_type != VDIR)
797 return (ENOTDIR);
798
799 error = VOP_ACCESS(dvp, VEXEC, cnp->cn_cred, cnp->cn_thread);
800 if (error)
801 return (error);
802
803 /* shortcut: check if the name is too long */
804 if (cnp->cn_namelen >= MQFS_NAMELEN)
805 return (ENOENT);
806
807 /* self */
808 if (namelen == 1 && pname[0] == '.') {
809 if ((flags & ISLASTCN) && nameiop != LOOKUP)
810 return (EINVAL);
811 pn = pd;
812 *vpp = dvp;
813 VREF(dvp);
814 return (0);
815 }
816
817 /* parent */
818 if (cnp->cn_flags & ISDOTDOT) {
819 if (dvp->v_vflag & VV_ROOT)
820 return (EIO);
821 if ((flags & ISLASTCN) && nameiop != LOOKUP)
822 return (EINVAL);
823 VOP_UNLOCK(dvp, 0, cnp->cn_thread);
824 KASSERT(pd->mn_parent, ("non-root directory has no parent"));
825 pn = pd->mn_parent;
826 error = mqfs_allocv(dvp->v_mount, vpp, pn);
827 vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY, td);
828 return (error);
829 }
830
831 /* named node */
832 pn = mqfs_search(pd, pname, namelen);
833
834 /* found */
835 if (pn != NULL) {
836 /* DELETE */
837 if (nameiop == DELETE && (flags & ISLASTCN)) {
838 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
839 if (error)
840 return (error);
841 if (*vpp == dvp) {
842 VREF(dvp);
843 *vpp = dvp;
844 return (0);
845 }
846 }
847
848 /* allocate vnode */
849 error = mqfs_allocv(dvp->v_mount, vpp, pn);
850 if (error == 0 && cnp->cn_flags & MAKEENTRY)
851 cache_enter(dvp, *vpp, cnp);
852 return (error);
853 }
854
855 /* not found */
856
857 /* will create a new entry in the directory ? */
858 if ((nameiop == CREATE || nameiop == RENAME) && (flags & LOCKPARENT)
859 && (flags & ISLASTCN)) {
860 error = VOP_ACCESS(dvp, VWRITE, cnp->cn_cred, td);
861 if (error)
862 return (error);
863 cnp->cn_flags |= SAVENAME;
864 return (EJUSTRETURN);
865 }
866 return (ENOENT);
867 }
868
869 #if 0
870 struct vop_lookup_args {
871 struct vop_generic_args a_gen;
872 struct vnode *a_dvp;
873 struct vnode **a_vpp;
874 struct componentname *a_cnp;
875 };
876 #endif
877
878 /*
879 * vnode lookup operation
880 */
881 static int
882 mqfs_lookup(struct vop_cachedlookup_args *ap)
883 {
884 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
885 int rc;
886
887 sx_xlock(&mqfs->mi_lock);
888 rc = mqfs_lookupx(ap);
889 sx_xunlock(&mqfs->mi_lock);
890 return (rc);
891 }
892
893 #if 0
894 struct vop_create_args {
895 struct vnode *a_dvp;
896 struct vnode **a_vpp;
897 struct componentname *a_cnp;
898 struct vattr *a_vap;
899 };
900 #endif
901
902 /*
903 * vnode creation operation
904 */
905 static int
906 mqfs_create(struct vop_create_args *ap)
907 {
908 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
909 struct componentname *cnp = ap->a_cnp;
910 struct mqfs_node *pd;
911 struct mqfs_node *pn;
912 struct mqueue *mq;
913 int error;
914
915 pd = VTON(ap->a_dvp);
916 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
917 return (ENOTDIR);
918 mq = mqueue_alloc(NULL);
919 if (mq == NULL)
920 return (EAGAIN);
921 sx_xlock(&mqfs->mi_lock);
922 #if 0
923 /* named node */
924 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen);
925 if (pn != NULL) {
926 mqueue_free(mq);
927 sx_xunlock(&mqfs->mi_lock);
928 return (EEXIST);
929 }
930 #else
931 if ((cnp->cn_flags & HASBUF) == 0)
932 panic("%s: no name", __func__);
933 #endif
934 pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen,
935 cnp->cn_cred, ap->a_vap->va_mode);
936 if (pn == NULL)
937 error = ENOSPC;
938 else {
939 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
940 if (error)
941 mqfs_destroy(pn);
942 else
943 pn->mn_data = mq;
944 }
945 sx_xunlock(&mqfs->mi_lock);
946 if (error)
947 mqueue_free(mq);
948 return (error);
949 }
950
951 /*
952 * Remove an entry
953 */
954 static
955 int do_unlink(struct mqfs_node *pn, struct ucred *ucred)
956 {
957 struct mqfs_node *parent;
958 struct mqfs_vdata *vd;
959 int error = 0;
960
961 sx_assert(&pn->mn_info->mi_lock, SX_LOCKED);
962
963 if (ucred->cr_uid != pn->mn_uid &&
964 (error = priv_check_cred(ucred, PRIV_MQ_ADMIN, 0)) != 0)
965 error = EACCES;
966 else if (!pn->mn_deleted) {
967 parent = pn->mn_parent;
968 pn->mn_parent = NULL;
969 pn->mn_deleted = 1;
970 LIST_REMOVE(pn, mn_sibling);
971 LIST_FOREACH(vd, &pn->mn_vnodes, mv_link) {
972 cache_purge(vd->mv_vnode);
973 vhold(vd->mv_vnode);
974 taskqueue_enqueue(taskqueue_thread, &vd->mv_task);
975 }
976 mqnode_release(pn);
977 mqnode_release(parent);
978 } else
979 error = ENOENT;
980 return (error);
981 }
982
983 #if 0
984 struct vop_remove_args {
985 struct vnode *a_dvp;
986 struct vnode *a_vp;
987 struct componentname *a_cnp;
988 };
989 #endif
990
991 /*
992 * vnode removal operation
993 */
994 static int
995 mqfs_remove(struct vop_remove_args *ap)
996 {
997 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
998 struct mqfs_node *pn;
999 int error;
1000
1001 if (ap->a_vp->v_type == VDIR)
1002 return (EPERM);
1003 pn = VTON(ap->a_vp);
1004 sx_xlock(&mqfs->mi_lock);
1005 error = do_unlink(pn, ap->a_cnp->cn_cred);
1006 sx_xunlock(&mqfs->mi_lock);
1007 return (error);
1008 }
1009
1010 #if 0
1011 struct vop_inactive_args {
1012 struct vnode *a_vp;
1013 struct thread *a_td;
1014 };
1015 #endif
1016
1017 static int
1018 mqfs_inactive(struct vop_inactive_args *ap)
1019 {
1020 struct mqfs_node *pn = VTON(ap->a_vp);
1021
1022 if (pn->mn_deleted)
1023 vrecycle(ap->a_vp, ap->a_td);
1024 return (0);
1025 }
1026
1027 #if 0
1028 struct vop_reclaim_args {
1029 struct vop_generic_args a_gen;
1030 struct vnode *a_vp;
1031 struct thread *a_td;
1032 };
1033 #endif
1034
1035 static int
1036 mqfs_reclaim(struct vop_reclaim_args *ap)
1037 {
1038 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount);
1039 struct vnode *vp = ap->a_vp;
1040 struct mqfs_node *pn;
1041 struct mqfs_vdata *vd;
1042
1043 vd = vp->v_data;
1044 pn = vd->mv_node;
1045 sx_xlock(&mqfs->mi_lock);
1046 vp->v_data = NULL;
1047 LIST_REMOVE(vd, mv_link);
1048 uma_zfree(mvdata_zone, vd);
1049 mqnode_release(pn);
1050 sx_xunlock(&mqfs->mi_lock);
1051 return (0);
1052 }
1053
1054 #if 0
1055 struct vop_open_args {
1056 struct vop_generic_args a_gen;
1057 struct vnode *a_vp;
1058 int a_mode;
1059 struct ucred *a_cred;
1060 struct thread *a_td;
1061 struct file *a_fp;
1062 };
1063 #endif
1064
1065 static int
1066 mqfs_open(struct vop_open_args *ap)
1067 {
1068 return (0);
1069 }
1070
1071 #if 0
1072 struct vop_close_args {
1073 struct vop_generic_args a_gen;
1074 struct vnode *a_vp;
1075 int a_fflag;
1076 struct ucred *a_cred;
1077 struct thread *a_td;
1078 };
1079 #endif
1080
1081 static int
1082 mqfs_close(struct vop_close_args *ap)
1083 {
1084 return (0);
1085 }
1086
1087 #if 0
1088 struct vop_access_args {
1089 struct vop_generic_args a_gen;
1090 struct vnode *a_vp;
1091 int a_mode;
1092 struct ucred *a_cred;
1093 struct thread *a_td;
1094 };
1095 #endif
1096
1097 /*
1098 * Verify permissions
1099 */
1100 static int
1101 mqfs_access(struct vop_access_args *ap)
1102 {
1103 struct vnode *vp = ap->a_vp;
1104 struct vattr vattr;
1105 int error;
1106
1107 error = VOP_GETATTR(vp, &vattr, ap->a_cred, ap->a_td);
1108 if (error)
1109 return (error);
1110 error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid,
1111 vattr.va_gid, ap->a_mode, ap->a_cred, NULL);
1112 return (error);
1113 }
1114
1115 #if 0
1116 struct vop_getattr_args {
1117 struct vop_generic_args a_gen;
1118 struct vnode *a_vp;
1119 struct vattr *a_vap;
1120 struct ucred *a_cred;
1121 struct thread *a_td;
1122 };
1123 #endif
1124
1125 /*
1126 * Get file attributes
1127 */
1128 static int
1129 mqfs_getattr(struct vop_getattr_args *ap)
1130 {
1131 struct vnode *vp = ap->a_vp;
1132 struct mqfs_node *pn = VTON(vp);
1133 struct vattr *vap = ap->a_vap;
1134 int error = 0;
1135
1136 vap->va_type = vp->v_type;
1137 vap->va_mode = pn->mn_mode;
1138 vap->va_nlink = 1;
1139 vap->va_uid = pn->mn_uid;
1140 vap->va_gid = pn->mn_gid;
1141 vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0];
1142 vap->va_fileid = pn->mn_fileno;
1143 vap->va_size = 0;
1144 vap->va_blocksize = PAGE_SIZE;
1145 vap->va_bytes = vap->va_size = 0;
1146 vap->va_atime = pn->mn_atime;
1147 vap->va_mtime = pn->mn_mtime;
1148 vap->va_ctime = pn->mn_ctime;
1149 vap->va_birthtime = pn->mn_birth;
1150 vap->va_gen = 0;
1151 vap->va_flags = 0;
1152 vap->va_rdev = NODEV;
1153 vap->va_bytes = 0;
1154 vap->va_filerev = 0;
1155 return (error);
1156 }
1157
1158 #if 0
1159 struct vop_setattr_args {
1160 struct vop_generic_args a_gen;
1161 struct vnode *a_vp;
1162 struct vattr *a_vap;
1163 struct ucred *a_cred;
1164 struct thread *a_td;
1165 };
1166 #endif
1167 /*
1168 * Set attributes
1169 */
1170 static int
1171 mqfs_setattr(struct vop_setattr_args *ap)
1172 {
1173 struct mqfs_node *pn;
1174 struct vattr *vap;
1175 struct vnode *vp;
1176 int c, error;
1177 uid_t uid;
1178 gid_t gid;
1179
1180 vap = ap->a_vap;
1181 vp = ap->a_vp;
1182 if ((vap->va_type != VNON) ||
1183 (vap->va_nlink != VNOVAL) ||
1184 (vap->va_fsid != VNOVAL) ||
1185 (vap->va_fileid != VNOVAL) ||
1186 (vap->va_blocksize != VNOVAL) ||
1187 (vap->va_flags != VNOVAL && vap->va_flags != 0) ||
1188 (vap->va_rdev != VNOVAL) ||
1189 ((int)vap->va_bytes != VNOVAL) ||
1190 (vap->va_gen != VNOVAL)) {
1191 return (EINVAL);
1192 }
1193
1194 pn = VTON(vp);
1195
1196 error = c = 0;
1197 if (vap->va_uid == (uid_t)VNOVAL)
1198 uid = pn->mn_uid;
1199 else
1200 uid = vap->va_uid;
1201 if (vap->va_gid == (gid_t)VNOVAL)
1202 gid = pn->mn_gid;
1203 else
1204 gid = vap->va_gid;
1205
1206 if (uid != pn->mn_uid || gid != pn->mn_gid) {
1207 /*
1208 * To modify the ownership of a file, must possess VADMIN
1209 * for that file.
1210 */
1211 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)))
1212 return (error);
1213
1214 /*
1215 * XXXRW: Why is there a privilege check here: shouldn't the
1216 * check in VOP_ACCESS() be enough? Also, are the group bits
1217 * below definitely right?
1218 */
1219 if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid ||
1220 (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) &&
1221 (error = priv_check(ap->a_td, PRIV_MQ_ADMIN)) != 0)
1222 return (error);
1223 pn->mn_uid = uid;
1224 pn->mn_gid = gid;
1225 c = 1;
1226 }
1227
1228 if (vap->va_mode != (mode_t)VNOVAL) {
1229 if ((ap->a_cred->cr_uid != pn->mn_uid) &&
1230 (error = priv_check(ap->a_td, PRIV_MQ_ADMIN)))
1231 return (error);
1232 pn->mn_mode = vap->va_mode;
1233 c = 1;
1234 }
1235
1236 if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL) {
1237 /* See the comment in ufs_vnops::ufs_setattr(). */
1238 if ((error = VOP_ACCESS(vp, VADMIN, ap->a_cred, ap->a_td)) &&
1239 ((vap->va_vaflags & VA_UTIMES_NULL) == 0 ||
1240 (error = VOP_ACCESS(vp, VWRITE, ap->a_cred, ap->a_td))))
1241 return (error);
1242 if (vap->va_atime.tv_sec != VNOVAL) {
1243 pn->mn_atime = vap->va_atime;
1244 }
1245 if (vap->va_mtime.tv_sec != VNOVAL) {
1246 pn->mn_mtime = vap->va_mtime;
1247 }
1248 c = 1;
1249 }
1250 if (c) {
1251 vfs_timestamp(&pn->mn_ctime);
1252 }
1253 return (0);
1254 }
1255
1256 #if 0
1257 struct vop_read_args {
1258 struct vop_generic_args a_gen;
1259 struct vnode *a_vp;
1260 struct uio *a_uio;
1261 int a_ioflag;
1262 struct ucred *a_cred;
1263 };
1264 #endif
1265
1266 /*
1267 * Read from a file
1268 */
1269 static int
1270 mqfs_read(struct vop_read_args *ap)
1271 {
1272 char buf[80];
1273 struct vnode *vp = ap->a_vp;
1274 struct uio *uio = ap->a_uio;
1275 struct mqfs_node *pn;
1276 struct mqueue *mq;
1277 int len, error;
1278
1279 if (vp->v_type != VREG)
1280 return (EINVAL);
1281
1282 pn = VTON(vp);
1283 mq = VTOMQ(vp);
1284 snprintf(buf, sizeof(buf),
1285 "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n",
1286 mq->mq_totalbytes,
1287 mq->mq_maxmsg,
1288 mq->mq_curmsgs,
1289 mq->mq_msgsize);
1290 buf[sizeof(buf)-1] = '\0';
1291 len = strlen(buf);
1292 error = uiomove_frombuf(buf, len, uio);
1293 return (error);
1294 }
1295
1296 #if 0
1297 struct vop_readdir_args {
1298 struct vop_generic_args a_gen;
1299 struct vnode *a_vp;
1300 struct uio *a_uio;
1301 struct ucred *a_cred;
1302 int *a_eofflag;
1303 int *a_ncookies;
1304 u_long **a_cookies;
1305 };
1306 #endif
1307
1308 /*
1309 * Return directory entries.
1310 */
1311 static int
1312 mqfs_readdir(struct vop_readdir_args *ap)
1313 {
1314 struct vnode *vp;
1315 struct mqfs_info *mi;
1316 struct mqfs_node *pd;
1317 struct mqfs_node *pn;
1318 struct dirent entry;
1319 struct uio *uio;
1320 int *tmp_ncookies = NULL;
1321 off_t offset;
1322 int error, i;
1323
1324 vp = ap->a_vp;
1325 mi = VFSTOMQFS(vp->v_mount);
1326 pd = VTON(vp);
1327 uio = ap->a_uio;
1328
1329 if (vp->v_type != VDIR)
1330 return (ENOTDIR);
1331
1332 if (uio->uio_offset < 0)
1333 return (EINVAL);
1334
1335 if (ap->a_ncookies != NULL) {
1336 tmp_ncookies = ap->a_ncookies;
1337 *ap->a_ncookies = 0;
1338 ap->a_ncookies = NULL;
1339 }
1340
1341 error = 0;
1342 offset = 0;
1343
1344 sx_xlock(&mi->mi_lock);
1345
1346 LIST_FOREACH(pn, &pd->mn_children, mn_sibling) {
1347 entry.d_reclen = sizeof(entry);
1348 if (!pn->mn_fileno)
1349 mqfs_fileno_alloc(mi, pn);
1350 entry.d_fileno = pn->mn_fileno;
1351 for (i = 0; i < MQFS_NAMELEN - 1 && pn->mn_name[i] != '\0'; ++i)
1352 entry.d_name[i] = pn->mn_name[i];
1353 entry.d_name[i] = 0;
1354 entry.d_namlen = i;
1355 switch (pn->mn_type) {
1356 case mqfstype_root:
1357 case mqfstype_dir:
1358 case mqfstype_this:
1359 case mqfstype_parent:
1360 entry.d_type = DT_DIR;
1361 break;
1362 case mqfstype_file:
1363 entry.d_type = DT_REG;
1364 break;
1365 case mqfstype_symlink:
1366 entry.d_type = DT_LNK;
1367 break;
1368 default:
1369 panic("%s has unexpected node type: %d", pn->mn_name,
1370 pn->mn_type);
1371 }
1372 if (entry.d_reclen > uio->uio_resid)
1373 break;
1374 if (offset >= uio->uio_offset) {
1375 error = vfs_read_dirent(ap, &entry, offset);
1376 if (error)
1377 break;
1378 }
1379 offset += entry.d_reclen;
1380 }
1381 sx_xunlock(&mi->mi_lock);
1382
1383 uio->uio_offset = offset;
1384
1385 if (tmp_ncookies != NULL)
1386 ap->a_ncookies = tmp_ncookies;
1387
1388 return (error);
1389 }
1390
1391 #ifdef notyet
1392
1393 #if 0
1394 struct vop_mkdir_args {
1395 struct vnode *a_dvp;
1396 struvt vnode **a_vpp;
1397 struvt componentname *a_cnp;
1398 struct vattr *a_vap;
1399 };
1400 #endif
1401
1402 /*
1403 * Create a directory.
1404 */
1405 static int
1406 mqfs_mkdir(struct vop_mkdir_args *ap)
1407 {
1408 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1409 struct componentname *cnp = ap->a_cnp;
1410 struct mqfs_node *pd = VTON(ap->a_dvp);
1411 struct mqfs_node *pn;
1412 int error;
1413
1414 if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir)
1415 return (ENOTDIR);
1416 sx_xlock(&mqfs->mi_lock);
1417 #if 0
1418 /* named node */
1419 pn = mqfs_search(pd, cnp->cn_nameptr, cnp->cn_namelen);
1420 if (pn != NULL) {
1421 sx_xunlock(&mqfs->mi_lock);
1422 return (EEXIST);
1423 }
1424 #else
1425 if ((cnp->cn_flags & HASBUF) == 0)
1426 panic("%s: no name", __func__);
1427 #endif
1428 pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen,
1429 ap->a_vap->cn_cred, ap->a_vap->va_mode);
1430 if (pn == NULL)
1431 error = ENOSPC;
1432 else
1433 error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn);
1434 sx_xunlock(&mqfs->mi_lock);
1435 return (error);
1436 }
1437
1438 #if 0
1439 struct vop_rmdir_args {
1440 struct vnode *a_dvp;
1441 struct vnode *a_vp;
1442 struct componentname *a_cnp;
1443 };
1444 #endif
1445
1446 /*
1447 * Remove a directory.
1448 */
1449 static int
1450 mqfs_rmdir(struct vop_rmdir_args *ap)
1451 {
1452 struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount);
1453 struct mqfs_node *pn = VTON(ap->a_vp);
1454 struct mqfs_node *pt;
1455
1456 if (pn->mn_type != mqfstype_dir)
1457 return (ENOTDIR);
1458
1459 sx_xlock(&mqfs->mi_lock);
1460 if (pn->mn_deleted) {
1461 sx_xunlock(&mqfs->mi_lock);
1462 return (ENOENT);
1463 }
1464
1465 pt = LIST_FIRST(&pn->mn_children);
1466 pt = LIST_NEXT(pt, mn_sibling);
1467 pt = LIST_NEXT(pt, mn_sibling);
1468 if (pt != NULL) {
1469 sx_xunlock(&mqfs->mi_lock);
1470 return (ENOTEMPTY);
1471 }
1472 pt = pn->mn_parent;
1473 pn->mn_parent = NULL;
1474 pn->mn_deleted = 1;
1475 LIST_REMOVE(pn, mn_sibling);
1476 mqnode_release(pn);
1477 mqnode_release(pt);
1478 sx_xunlock(&mqfs->mi_lock);
1479 cache_purge(ap->a_vp);
1480 return (0);
1481 }
1482
1483 #endif /* notyet */
1484
1485 /*
1486 * Allocate a message queue
1487 */
1488 static struct mqueue *
1489 mqueue_alloc(const struct mq_attr *attr)
1490 {
1491 struct mqueue *mq;
1492
1493 if (curmq >= maxmq)
1494 return (NULL);
1495 mq = uma_zalloc(mqueue_zone, M_WAITOK | M_ZERO);
1496 TAILQ_INIT(&mq->mq_msgq);
1497 if (attr != NULL) {
1498 mq->mq_maxmsg = attr->mq_maxmsg;
1499 mq->mq_msgsize = attr->mq_msgsize;
1500 } else {
1501 mq->mq_maxmsg = default_maxmsg;
1502 mq->mq_msgsize = default_msgsize;
1503 }
1504 mtx_init(&mq->mq_mutex, "mqueue", NULL, MTX_DEF);
1505 knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex);
1506 knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex);
1507 atomic_add_int(&curmq, 1);
1508 return (mq);
1509 }
1510
1511 /*
1512 * Destroy a message queue
1513 */
1514 static void
1515 mqueue_free(struct mqueue *mq)
1516 {
1517 struct mqueue_msg *msg;
1518
1519 while ((msg = TAILQ_FIRST(&mq->mq_msgq)) != NULL) {
1520 TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link);
1521 FREE(msg, M_MQUEUEDATA);
1522 }
1523
1524 mtx_destroy(&mq->mq_mutex);
1525 knlist_destroy(&mq->mq_rsel.si_note);
1526 knlist_destroy(&mq->mq_wsel.si_note);
1527 uma_zfree(mqueue_zone, mq);
1528 atomic_add_int(&curmq, -1);
1529 }
1530
1531 /*
1532 * Load a message from user space
1533 */
1534 static struct mqueue_msg *
1535 mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio)
1536 {
1537 struct mqueue_msg *msg;
1538 size_t len;
1539 int error;
1540
1541 len = sizeof(struct mqueue_msg) + msg_size;
1542 MALLOC(msg, struct mqueue_msg *, len, M_MQUEUEDATA, M_WAITOK);
1543 error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg),
1544 msg_size);
1545 if (error) {
1546 FREE(msg, M_MQUEUEDATA);
1547 msg = NULL;
1548 } else {
1549 msg->msg_size = msg_size;
1550 msg->msg_prio = msg_prio;
1551 }
1552 return (msg);
1553 }
1554
1555 /*
1556 * Save a message to user space
1557 */
1558 static int
1559 mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio)
1560 {
1561 int error;
1562
1563 error = copyout(((char *)msg) + sizeof(*msg), msg_ptr,
1564 msg->msg_size);
1565 if (error == 0 && msg_prio != NULL)
1566 error = copyout(&msg->msg_prio, msg_prio, sizeof(int));
1567 return (error);
1568 }
1569
1570 /*
1571 * Free a message's memory
1572 */
1573 static __inline void
1574 mqueue_freemsg(struct mqueue_msg *msg)
1575 {
1576 FREE(msg, M_MQUEUEDATA);
1577 }
1578
1579 /*
1580 * Send a message. if waitok is false, thread will not be
1581 * blocked if there is no data in queue, otherwise, absolute
1582 * time will be checked.
1583 */
1584 int
1585 mqueue_send(struct mqueue *mq, const char *msg_ptr,
1586 size_t msg_len, unsigned msg_prio, int waitok,
1587 const struct timespec *abs_timeout)
1588 {
1589 struct mqueue_msg *msg;
1590 struct timespec ets, ts, ts2;
1591 struct timeval tv;
1592 int error;
1593
1594 if (msg_prio >= MQ_PRIO_MAX)
1595 return (EINVAL);
1596 if (msg_len > mq->mq_msgsize)
1597 return (EMSGSIZE);
1598 msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio);
1599 if (msg == NULL)
1600 return (EFAULT);
1601
1602 /* O_NONBLOCK case */
1603 if (!waitok) {
1604 error = _mqueue_send(mq, msg, -1);
1605 if (error)
1606 goto bad;
1607 return (0);
1608 }
1609
1610 /* we allow a null timeout (wait forever) */
1611 if (abs_timeout == NULL) {
1612 error = _mqueue_send(mq, msg, 0);
1613 if (error)
1614 goto bad;
1615 return (0);
1616 }
1617
1618 /* send it before checking time */
1619 error = _mqueue_send(mq, msg, -1);
1620 if (error == 0)
1621 return (0);
1622
1623 if (error != EAGAIN)
1624 goto bad;
1625
1626 error = copyin(abs_timeout, &ets, sizeof(ets));
1627 if (error != 0)
1628 goto bad;
1629 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) {
1630 error = EINVAL;
1631 goto bad;
1632 }
1633 for (;;) {
1634 ts2 = ets;
1635 getnanotime(&ts);
1636 timespecsub(&ts2, &ts);
1637 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1638 error = ETIMEDOUT;
1639 break;
1640 }
1641 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1642 error = _mqueue_send(mq, msg, tvtohz(&tv));
1643 if (error != ETIMEDOUT)
1644 break;
1645 }
1646 if (error == 0)
1647 return (0);
1648 bad:
1649 mqueue_freemsg(msg);
1650 return (error);
1651 }
1652
1653 /*
1654 * Common routine to send a message
1655 */
1656 static int
1657 _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo)
1658 {
1659 struct mqueue_msg *msg2;
1660 int error = 0;
1661
1662 mtx_lock(&mq->mq_mutex);
1663 while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) {
1664 if (timo < 0) {
1665 mtx_unlock(&mq->mq_mutex);
1666 return (EAGAIN);
1667 }
1668 mq->mq_senders++;
1669 error = msleep(&mq->mq_senders, &mq->mq_mutex,
1670 PCATCH, "mqsend", timo);
1671 mq->mq_senders--;
1672 if (error == EAGAIN)
1673 error = ETIMEDOUT;
1674 }
1675 if (mq->mq_curmsgs >= mq->mq_maxmsg) {
1676 mtx_unlock(&mq->mq_mutex);
1677 return (error);
1678 }
1679 error = 0;
1680 if (TAILQ_EMPTY(&mq->mq_msgq)) {
1681 TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link);
1682 } else {
1683 if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)->msg_prio) {
1684 TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link);
1685 } else {
1686 TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link) {
1687 if (msg2->msg_prio < msg->msg_prio)
1688 break;
1689 }
1690 TAILQ_INSERT_BEFORE(msg2, msg, msg_link);
1691 }
1692 }
1693 mq->mq_curmsgs++;
1694 mq->mq_totalbytes += msg->msg_size;
1695 if (mq->mq_receivers)
1696 wakeup_one(&mq->mq_receivers);
1697 else if (mq->mq_notifier != NULL)
1698 mqueue_send_notification(mq);
1699 if (mq->mq_flags & MQ_RSEL) {
1700 mq->mq_flags &= ~MQ_RSEL;
1701 selwakeup(&mq->mq_rsel);
1702 }
1703 KNOTE_LOCKED(&mq->mq_rsel.si_note, 0);
1704 mtx_unlock(&mq->mq_mutex);
1705 return (0);
1706 }
1707
1708 /*
1709 * Send realtime a signal to process which registered itself
1710 * successfully by mq_notify.
1711 */
1712 static void
1713 mqueue_send_notification(struct mqueue *mq)
1714 {
1715 struct mqueue_notifier *nt;
1716 struct proc *p;
1717
1718 mtx_assert(&mq->mq_mutex, MA_OWNED);
1719 nt = mq->mq_notifier;
1720 if (nt->nt_sigev.sigev_notify != SIGEV_NONE) {
1721 p = nt->nt_proc;
1722 PROC_LOCK(p);
1723 if (!KSI_ONQ(&nt->nt_ksi))
1724 psignal_event(p, &nt->nt_sigev, &nt->nt_ksi);
1725 PROC_UNLOCK(p);
1726 }
1727 mq->mq_notifier = NULL;
1728 }
1729
1730 /*
1731 * Get a message. if waitok is false, thread will not be
1732 * blocked if there is no data in queue, otherwise, absolute
1733 * time will be checked.
1734 */
1735 int
1736 mqueue_receive(struct mqueue *mq, char *msg_ptr,
1737 size_t msg_len, unsigned *msg_prio, int waitok,
1738 const struct timespec *abs_timeout)
1739 {
1740 struct mqueue_msg *msg;
1741 struct timespec ets, ts, ts2;
1742 struct timeval tv;
1743 int error;
1744
1745 if (msg_len < mq->mq_msgsize)
1746 return (EMSGSIZE);
1747
1748 /* O_NONBLOCK case */
1749 if (!waitok) {
1750 error = _mqueue_recv(mq, &msg, -1);
1751 if (error)
1752 return (error);
1753 goto received;
1754 }
1755
1756 /* we allow a null timeout (wait forever). */
1757 if (abs_timeout == NULL) {
1758 error = _mqueue_recv(mq, &msg, 0);
1759 if (error)
1760 return (error);
1761 goto received;
1762 }
1763
1764 /* try to get a message before checking time */
1765 error = _mqueue_recv(mq, &msg, -1);
1766 if (error == 0)
1767 goto received;
1768
1769 if (error != EAGAIN)
1770 return (error);
1771
1772 error = copyin(abs_timeout, &ets, sizeof(ets));
1773 if (error != 0)
1774 return (error);
1775 if (ets.tv_nsec >= 1000000000 || ets.tv_nsec < 0) {
1776 error = EINVAL;
1777 return (error);
1778 }
1779
1780 for (;;) {
1781 ts2 = ets;
1782 getnanotime(&ts);
1783 timespecsub(&ts2, &ts);
1784 if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) {
1785 error = ETIMEDOUT;
1786 return (error);
1787 }
1788 TIMESPEC_TO_TIMEVAL(&tv, &ts2);
1789 error = _mqueue_recv(mq, &msg, tvtohz(&tv));
1790 if (error == 0)
1791 break;
1792 if (error != ETIMEDOUT)
1793 return (error);
1794 }
1795
1796 received:
1797 error = mqueue_savemsg(msg, msg_ptr, msg_prio);
1798 if (error == 0) {
1799 curthread->td_retval[0] = msg->msg_size;
1800 curthread->td_retval[1] = 0;
1801 }
1802 mqueue_freemsg(msg);
1803 return (error);
1804 }
1805
1806 /*
1807 * Common routine to receive a message
1808 */
1809 static int
1810 _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo)
1811 {
1812 int error = 0;
1813
1814 mtx_lock(&mq->mq_mutex);
1815 while ((*msg = TAILQ_FIRST(&mq->mq_msgq)) == NULL && error == 0) {
1816 if (timo < 0) {
1817 mtx_unlock(&mq->mq_mutex);
1818 return (EAGAIN);
1819 }
1820 mq->mq_receivers++;
1821 error = msleep(&mq->mq_receivers, &mq->mq_mutex,
1822 PCATCH, "mqrecv", timo);
1823 mq->mq_receivers--;
1824 if (error == EAGAIN)
1825 error = ETIMEDOUT;
1826 }
1827 if (*msg != NULL) {
1828 error = 0;
1829 TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link);
1830 mq->mq_curmsgs--;
1831 mq->mq_totalbytes -= (*msg)->msg_size;
1832 if (mq->mq_senders)
1833 wakeup_one(&mq->mq_senders);
1834 if (mq->mq_flags & MQ_WSEL) {
1835 mq->mq_flags &= ~MQ_WSEL;
1836 selwakeup(&mq->mq_wsel);
1837 }
1838 KNOTE_LOCKED(&mq->mq_wsel.si_note, 0);
1839 }
1840 if (mq->mq_notifier != NULL && mq->mq_receivers == 0 &&
1841 !TAILQ_EMPTY(&mq->mq_msgq)) {
1842 mqueue_send_notification(mq);
1843 }
1844 mtx_unlock(&mq->mq_mutex);
1845 return (error);
1846 }
1847
1848 static __inline struct mqueue_notifier *
1849 notifier_alloc(void)
1850 {
1851 return (uma_zalloc(mqnoti_zone, M_WAITOK | M_ZERO));
1852 }
1853
1854 static __inline void
1855 notifier_free(struct mqueue_notifier *p)
1856 {
1857 uma_zfree(mqnoti_zone, p);
1858 }
1859
1860 static struct mqueue_notifier *
1861 notifier_search(struct proc *p, int fd)
1862 {
1863 struct mqueue_notifier *nt;
1864
1865 LIST_FOREACH(nt, &p->p_mqnotifier, nt_link) {
1866 if (nt->nt_ksi.ksi_mqd == fd)
1867 break;
1868 }
1869 return (nt);
1870 }
1871
1872 static __inline void
1873 notifier_insert(struct proc *p, struct mqueue_notifier *nt)
1874 {
1875 LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link);
1876 }
1877
1878 static __inline void
1879 notifier_delete(struct proc *p, struct mqueue_notifier *nt)
1880 {
1881 LIST_REMOVE(nt, nt_link);
1882 notifier_free(nt);
1883 }
1884
1885 static void
1886 notifier_remove(struct proc *p, struct mqueue *mq, int fd)
1887 {
1888 struct mqueue_notifier *nt;
1889
1890 mtx_assert(&mq->mq_mutex, MA_OWNED);
1891 PROC_LOCK(p);
1892 nt = notifier_search(p, fd);
1893 if (nt != NULL) {
1894 if (mq->mq_notifier == nt)
1895 mq->mq_notifier = NULL;
1896 sigqueue_take(&nt->nt_ksi);
1897 notifier_delete(p, nt);
1898 }
1899 PROC_UNLOCK(p);
1900 }
1901
1902 /*
1903 * Syscall to open a message queue.
1904 */
1905 int
1906 kmq_open(struct thread *td, struct kmq_open_args *uap)
1907 {
1908 char path[MQFS_NAMELEN + 1];
1909 struct mq_attr attr, *pattr;
1910 struct mqfs_node *pn;
1911 struct filedesc *fdp;
1912 struct file *fp;
1913 struct mqueue *mq;
1914 int fd, error, len, flags, cmode;
1915
1916 if ((uap->flags & O_ACCMODE) == O_ACCMODE)
1917 return (EINVAL);
1918
1919 fdp = td->td_proc->p_fd;
1920 flags = FFLAGS(uap->flags);
1921 cmode = (((uap->mode & ~fdp->fd_cmask) & ALLPERMS) & ~S_ISTXT);
1922 mq = NULL;
1923 if ((flags & O_CREAT) && (uap->attr != NULL)) {
1924 error = copyin(uap->attr, &attr, sizeof(attr));
1925 if (error)
1926 return (error);
1927 if (attr.mq_maxmsg <= 0 || attr.mq_maxmsg > maxmsg)
1928 return (EINVAL);
1929 if (attr.mq_msgsize <= 0 || attr.mq_msgsize > maxmsgsize)
1930 return (EINVAL);
1931 pattr = &attr;
1932 } else
1933 pattr = NULL;
1934
1935 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
1936 if (error)
1937 return (error);
1938
1939 /*
1940 * The first character of name must be a slash (/) character
1941 * and the remaining characters of name cannot include any slash
1942 * characters.
1943 */
1944 len = strlen(path);
1945 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL)
1946 return (EINVAL);
1947
1948 error = falloc(td, &fp, &fd);
1949 if (error)
1950 return (error);
1951
1952 sx_xlock(&mqfs_data.mi_lock);
1953 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1);
1954 if (pn == NULL) {
1955 if (!(flags & O_CREAT)) {
1956 error = ENOENT;
1957 } else {
1958 mq = mqueue_alloc(pattr);
1959 if (mq == NULL) {
1960 error = ENFILE;
1961 } else {
1962 pn = mqfs_create_file(mqfs_data.mi_root,
1963 path + 1, len - 1, td->td_ucred,
1964 cmode);
1965 if (pn == NULL) {
1966 error = ENOSPC;
1967 mqueue_free(mq);
1968 }
1969 }
1970 }
1971
1972 if (error == 0) {
1973 pn->mn_data = mq;
1974 }
1975 } else {
1976 if ((flags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL)) {
1977 error = EEXIST;
1978 } else {
1979 int acc_mode = 0;
1980
1981 if (flags & FREAD)
1982 acc_mode |= VREAD;
1983 if (flags & FWRITE)
1984 acc_mode |= VWRITE;
1985 error = vaccess(VREG, pn->mn_mode, pn->mn_uid,
1986 pn->mn_gid, acc_mode, td->td_ucred, NULL);
1987 }
1988 }
1989
1990 if (error) {
1991 sx_xunlock(&mqfs_data.mi_lock);
1992 fdclose(fdp, fp, fd, td);
1993 fdrop(fp, td);
1994 return (error);
1995 }
1996
1997 mqnode_addref(pn);
1998 sx_xunlock(&mqfs_data.mi_lock);
1999
2000 FILE_LOCK(fp);
2001 fp->f_flag = (flags & (FREAD | FWRITE | O_NONBLOCK));
2002 fp->f_type = DTYPE_MQUEUE;
2003 fp->f_data = pn;
2004 fp->f_ops = &mqueueops;
2005 FILE_UNLOCK(fp);
2006
2007 FILEDESC_XLOCK(fdp);
2008 if (fdp->fd_ofiles[fd] == fp)
2009 fdp->fd_ofileflags[fd] |= UF_EXCLOSE;
2010 FILEDESC_XUNLOCK(fdp);
2011 td->td_retval[0] = fd;
2012 fdrop(fp, td);
2013 return (0);
2014 }
2015
2016 /*
2017 * Syscall to unlink a message queue.
2018 */
2019 int
2020 kmq_unlink(struct thread *td, struct kmq_unlink_args *uap)
2021 {
2022 char path[MQFS_NAMELEN+1];
2023 struct mqfs_node *pn;
2024 int error, len;
2025
2026 error = copyinstr(uap->path, path, MQFS_NAMELEN + 1, NULL);
2027 if (error)
2028 return (error);
2029
2030 len = strlen(path);
2031 if (len < 2 || path[0] != '/' || index(path + 1, '/') != NULL)
2032 return (EINVAL);
2033
2034 sx_xlock(&mqfs_data.mi_lock);
2035 pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1);
2036 if (pn != NULL)
2037 error = do_unlink(pn, td->td_ucred);
2038 else
2039 error = ENOENT;
2040 sx_xunlock(&mqfs_data.mi_lock);
2041 return (error);
2042 }
2043
2044 typedef int (*_fgetf)(struct thread *, int, struct file **);
2045
2046 /*
2047 * Get message queue by giving file slot
2048 */
2049 static int
2050 _getmq(struct thread *td, int fd, _fgetf func,
2051 struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq)
2052 {
2053 struct mqfs_node *pn;
2054 int error;
2055
2056 error = func(td, fd, fpp);
2057 if (error)
2058 return (error);
2059 if (&mqueueops != (*fpp)->f_ops) {
2060 fdrop(*fpp, td);
2061 return (EBADF);
2062 }
2063 pn = (*fpp)->f_data;
2064 if (ppn)
2065 *ppn = pn;
2066 if (pmq)
2067 *pmq = pn->mn_data;
2068 return (0);
2069 }
2070
2071 static __inline int
2072 getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn,
2073 struct mqueue **pmq)
2074 {
2075 return _getmq(td, fd, fget, fpp, ppn, pmq);
2076 }
2077
2078 static __inline int
2079 getmq_read(struct thread *td, int fd, struct file **fpp,
2080 struct mqfs_node **ppn, struct mqueue **pmq)
2081 {
2082 return _getmq(td, fd, fget_read, fpp, ppn, pmq);
2083 }
2084
2085 static __inline int
2086 getmq_write(struct thread *td, int fd, struct file **fpp,
2087 struct mqfs_node **ppn, struct mqueue **pmq)
2088 {
2089 return _getmq(td, fd, fget_write, fpp, ppn, pmq);
2090 }
2091
2092 int
2093 kmq_setattr(struct thread *td, struct kmq_setattr_args *uap)
2094 {
2095 struct mqueue *mq;
2096 struct file *fp;
2097 struct mq_attr attr, oattr;
2098 int error;
2099
2100 if (uap->attr) {
2101 error = copyin(uap->attr, &attr, sizeof(attr));
2102 if (error)
2103 return (error);
2104 if (attr.mq_flags & ~O_NONBLOCK)
2105 return (EINVAL);
2106 }
2107 error = getmq(td, uap->mqd, &fp, NULL, &mq);
2108 if (error)
2109 return (error);
2110 oattr.mq_maxmsg = mq->mq_maxmsg;
2111 oattr.mq_msgsize = mq->mq_msgsize;
2112 oattr.mq_curmsgs = mq->mq_curmsgs;
2113 FILE_LOCK(fp);
2114 oattr.mq_flags = (O_NONBLOCK & fp->f_flag);
2115 if (uap->attr) {
2116 fp->f_flag &= ~O_NONBLOCK;
2117 fp->f_flag |= (attr.mq_flags & O_NONBLOCK);
2118 }
2119 FILE_UNLOCK(fp);
2120 fdrop(fp, td);
2121 if (uap->oattr)
2122 error = copyout(&oattr, uap->oattr, sizeof(oattr));
2123 return (error);
2124 }
2125
2126 int
2127 kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap)
2128 {
2129 struct mqueue *mq;
2130 struct file *fp;
2131 int error;
2132 int waitok;
2133
2134 error = getmq_read(td, uap->mqd, &fp, NULL, &mq);
2135 if (error)
2136 return (error);
2137 waitok = !(fp->f_flag & O_NONBLOCK);
2138 error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len,
2139 uap->msg_prio, waitok, uap->abs_timeout);
2140 fdrop(fp, td);
2141 return (error);
2142 }
2143
2144 int
2145 kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap)
2146 {
2147 struct mqueue *mq;
2148 struct file *fp;
2149 int error, waitok;
2150
2151 error = getmq_write(td, uap->mqd, &fp, NULL, &mq);
2152 if (error)
2153 return (error);
2154 waitok = !(fp->f_flag & O_NONBLOCK);
2155 error = mqueue_send(mq, uap->msg_ptr, uap->msg_len,
2156 uap->msg_prio, waitok, uap->abs_timeout);
2157 fdrop(fp, td);
2158 return (error);
2159 }
2160
2161 int
2162 kmq_notify(struct thread *td, struct kmq_notify_args *uap)
2163 {
2164 struct sigevent ev;
2165 struct filedesc *fdp;
2166 struct proc *p;
2167 struct mqueue *mq;
2168 struct file *fp;
2169 struct mqueue_notifier *nt, *newnt = NULL;
2170 int error;
2171
2172 p = td->td_proc;
2173 fdp = td->td_proc->p_fd;
2174 if (uap->sigev) {
2175 error = copyin(uap->sigev, &ev, sizeof(ev));
2176 if (error)
2177 return (error);
2178 if (ev.sigev_notify != SIGEV_SIGNAL &&
2179 ev.sigev_notify != SIGEV_THREAD_ID &&
2180 ev.sigev_notify != SIGEV_NONE)
2181 return (EINVAL);
2182 if ((ev.sigev_notify == SIGEV_SIGNAL ||
2183 ev.sigev_notify == SIGEV_THREAD_ID) &&
2184 !_SIG_VALID(ev.sigev_signo))
2185 return (EINVAL);
2186 }
2187 error = getmq(td, uap->mqd, &fp, NULL, &mq);
2188 if (error)
2189 return (error);
2190 again:
2191 FILEDESC_SLOCK(fdp);
2192 if (fget_locked(fdp, uap->mqd) != fp) {
2193 FILEDESC_SUNLOCK(fdp);
2194 error = EBADF;
2195 goto out;
2196 }
2197 mtx_lock(&mq->mq_mutex);
2198 FILEDESC_SUNLOCK(fdp);
2199 if (uap->sigev != NULL) {
2200 if (mq->mq_notifier != NULL) {
2201 error = EBUSY;
2202 } else {
2203 PROC_LOCK(p);
2204 nt = notifier_search(p, uap->mqd);
2205 if (nt == NULL) {
2206 if (newnt == NULL) {
2207 PROC_UNLOCK(p);
2208 mtx_unlock(&mq->mq_mutex);
2209 newnt = notifier_alloc();
2210 goto again;
2211 }
2212 }
2213
2214 if (nt != NULL) {
2215 sigqueue_take(&nt->nt_ksi);
2216 if (newnt != NULL) {
2217 notifier_free(newnt);
2218 newnt = NULL;
2219 }
2220 } else {
2221 nt = newnt;
2222 newnt = NULL;
2223 ksiginfo_init(&nt->nt_ksi);
2224 nt->nt_ksi.ksi_flags |= KSI_INS | KSI_EXT;
2225 nt->nt_ksi.ksi_code = SI_MESGQ;
2226 nt->nt_proc = p;
2227 nt->nt_ksi.ksi_mqd = uap->mqd;
2228 notifier_insert(p, nt);
2229 }
2230 nt->nt_sigev = ev;
2231 mq->mq_notifier = nt;
2232 PROC_UNLOCK(p);
2233 /*
2234 * if there is no receivers and message queue
2235 * is not empty, we should send notification
2236 * as soon as possible.
2237 */
2238 if (mq->mq_receivers == 0 &&
2239 !TAILQ_EMPTY(&mq->mq_msgq))
2240 mqueue_send_notification(mq);
2241 }
2242 } else {
2243 notifier_remove(p, mq, uap->mqd);
2244 }
2245 mtx_unlock(&mq->mq_mutex);
2246
2247 out:
2248 fdrop(fp, td);
2249 if (newnt != NULL)
2250 notifier_free(newnt);
2251 return (error);
2252 }
2253
2254 static void
2255 mqueue_fdclose(struct thread *td, int fd, struct file *fp)
2256 {
2257 struct filedesc *fdp;
2258 struct mqueue *mq;
2259
2260 fdp = td->td_proc->p_fd;
2261 FILEDESC_LOCK_ASSERT(fdp);
2262
2263 if (fp->f_ops == &mqueueops) {
2264 mq = FPTOMQ(fp);
2265 mtx_lock(&mq->mq_mutex);
2266 notifier_remove(td->td_proc, mq, fd);
2267
2268 /* have to wakeup thread in same process */
2269 if (mq->mq_flags & MQ_RSEL) {
2270 mq->mq_flags &= ~MQ_RSEL;
2271 selwakeup(&mq->mq_rsel);
2272 }
2273 if (mq->mq_flags & MQ_WSEL) {
2274 mq->mq_flags &= ~MQ_WSEL;
2275 selwakeup(&mq->mq_wsel);
2276 }
2277 mtx_unlock(&mq->mq_mutex);
2278 }
2279 }
2280
2281 static void
2282 mq_proc_exit(void *arg __unused, struct proc *p)
2283 {
2284 struct filedesc *fdp;
2285 struct file *fp;
2286 struct mqueue *mq;
2287 int i;
2288
2289 fdp = p->p_fd;
2290 FILEDESC_SLOCK(fdp);
2291 for (i = 0; i < fdp->fd_nfiles; ++i) {
2292 fp = fget_locked(fdp, i);
2293 if (fp != NULL && fp->f_ops == &mqueueops) {
2294 mq = FPTOMQ(fp);
2295 mtx_lock(&mq->mq_mutex);
2296 notifier_remove(p, FPTOMQ(fp), i);
2297 mtx_unlock(&mq->mq_mutex);
2298 }
2299 }
2300 FILEDESC_SUNLOCK(fdp);
2301 KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"));
2302 }
2303
2304 static int
2305 mqf_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
2306 int flags, struct thread *td)
2307 {
2308 return (EOPNOTSUPP);
2309 }
2310
2311 static int
2312 mqf_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
2313 int flags, struct thread *td)
2314 {
2315 return (EOPNOTSUPP);
2316 }
2317
2318 static int
2319 mqf_ioctl(struct file *fp, u_long cmd, void *data,
2320 struct ucred *active_cred, struct thread *td)
2321 {
2322 return (ENOTTY);
2323 }
2324
2325 static int
2326 mqf_poll(struct file *fp, int events, struct ucred *active_cred,
2327 struct thread *td)
2328 {
2329 struct mqueue *mq = FPTOMQ(fp);
2330 int revents = 0;
2331
2332 mtx_lock(&mq->mq_mutex);
2333 if (events & (POLLIN | POLLRDNORM)) {
2334 if (mq->mq_curmsgs) {
2335 revents |= events & (POLLIN | POLLRDNORM);
2336 } else {
2337 mq->mq_flags |= MQ_RSEL;
2338 selrecord(td, &mq->mq_rsel);
2339 }
2340 }
2341 if (events & POLLOUT) {
2342 if (mq->mq_curmsgs < mq->mq_maxmsg)
2343 revents |= POLLOUT;
2344 else {
2345 mq->mq_flags |= MQ_WSEL;
2346 selrecord(td, &mq->mq_wsel);
2347 }
2348 }
2349 mtx_unlock(&mq->mq_mutex);
2350 return (revents);
2351 }
2352
2353 static int
2354 mqf_close(struct file *fp, struct thread *td)
2355 {
2356 struct mqfs_node *pn;
2357
2358 fp->f_ops = &badfileops;
2359 pn = fp->f_data;
2360 fp->f_data = NULL;
2361 sx_xlock(&mqfs_data.mi_lock);
2362 mqnode_release(pn);
2363 sx_xunlock(&mqfs_data.mi_lock);
2364 return (0);
2365 }
2366
2367 static int
2368 mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred,
2369 struct thread *td)
2370 {
2371 struct mqfs_node *pn = fp->f_data;
2372
2373 bzero(st, sizeof *st);
2374 st->st_atimespec = pn->mn_atime;
2375 st->st_mtimespec = pn->mn_mtime;
2376 st->st_ctimespec = pn->mn_ctime;
2377 st->st_birthtimespec = pn->mn_birth;
2378 st->st_uid = pn->mn_uid;
2379 st->st_gid = pn->mn_gid;
2380 st->st_mode = S_IFIFO | pn->mn_mode;
2381 return (0);
2382 }
2383
2384 static int
2385 mqf_kqfilter(struct file *fp, struct knote *kn)
2386 {
2387 struct mqueue *mq = FPTOMQ(fp);
2388 int error = 0;
2389
2390 if (kn->kn_filter == EVFILT_READ) {
2391 kn->kn_fop = &mq_rfiltops;
2392 knlist_add(&mq->mq_rsel.si_note, kn, 0);
2393 } else if (kn->kn_filter == EVFILT_WRITE) {
2394 kn->kn_fop = &mq_wfiltops;
2395 knlist_add(&mq->mq_wsel.si_note, kn, 0);
2396 } else
2397 error = EINVAL;
2398 return (error);
2399 }
2400
2401 static void
2402 filt_mqdetach(struct knote *kn)
2403 {
2404 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2405
2406 if (kn->kn_filter == EVFILT_READ)
2407 knlist_remove(&mq->mq_rsel.si_note, kn, 0);
2408 else if (kn->kn_filter == EVFILT_WRITE)
2409 knlist_remove(&mq->mq_wsel.si_note, kn, 0);
2410 else
2411 panic("filt_mqdetach");
2412 }
2413
2414 static int
2415 filt_mqread(struct knote *kn, long hint)
2416 {
2417 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2418
2419 mtx_assert(&mq->mq_mutex, MA_OWNED);
2420 return (mq->mq_curmsgs != 0);
2421 }
2422
2423 static int
2424 filt_mqwrite(struct knote *kn, long hint)
2425 {
2426 struct mqueue *mq = FPTOMQ(kn->kn_fp);
2427
2428 mtx_assert(&mq->mq_mutex, MA_OWNED);
2429 return (mq->mq_curmsgs < mq->mq_maxmsg);
2430 }
2431
2432 static struct fileops mqueueops = {
2433 .fo_read = mqf_read,
2434 .fo_write = mqf_write,
2435 .fo_ioctl = mqf_ioctl,
2436 .fo_poll = mqf_poll,
2437 .fo_kqfilter = mqf_kqfilter,
2438 .fo_stat = mqf_stat,
2439 .fo_close = mqf_close
2440 };
2441
2442 static struct vop_vector mqfs_vnodeops = {
2443 .vop_default = &default_vnodeops,
2444 .vop_access = mqfs_access,
2445 .vop_cachedlookup = mqfs_lookup,
2446 .vop_lookup = vfs_cache_lookup,
2447 .vop_reclaim = mqfs_reclaim,
2448 .vop_create = mqfs_create,
2449 .vop_remove = mqfs_remove,
2450 .vop_inactive = mqfs_inactive,
2451 .vop_open = mqfs_open,
2452 .vop_close = mqfs_close,
2453 .vop_getattr = mqfs_getattr,
2454 .vop_setattr = mqfs_setattr,
2455 .vop_read = mqfs_read,
2456 .vop_write = VOP_EOPNOTSUPP,
2457 .vop_readdir = mqfs_readdir,
2458 .vop_mkdir = VOP_EOPNOTSUPP,
2459 .vop_rmdir = VOP_EOPNOTSUPP
2460 };
2461
2462 static struct vfsops mqfs_vfsops = {
2463 .vfs_init = mqfs_init,
2464 .vfs_uninit = mqfs_uninit,
2465 .vfs_mount = mqfs_mount,
2466 .vfs_unmount = mqfs_unmount,
2467 .vfs_root = mqfs_root,
2468 .vfs_statfs = mqfs_statfs,
2469 };
2470
2471 SYSCALL_MODULE_HELPER(kmq_open);
2472 SYSCALL_MODULE_HELPER(kmq_setattr);
2473 SYSCALL_MODULE_HELPER(kmq_timedsend);
2474 SYSCALL_MODULE_HELPER(kmq_timedreceive);
2475 SYSCALL_MODULE_HELPER(kmq_notify);
2476 SYSCALL_MODULE_HELPER(kmq_unlink);
2477
2478 VFS_SET(mqfs_vfsops, mqueuefs, VFCF_SYNTHETIC);
2479 MODULE_VERSION(mqueuefs, 1);
Cache object: 2469855a96d388d0f9b9b31ef147c818
|