File: | modules/mqueue/../../kern/uipc_mqueue.c |
Warning: | line 2766, column 11 Copies out a struct with untouched element(s): __reserved |
1 | /*- | |||
2 | * Copyright (c) 2005 David Xu <[email protected]> | |||
3 | * All rights reserved. | |||
4 | * | |||
5 | * Redistribution and use in source and binary forms, with or without | |||
6 | * modification, are permitted provided that the following conditions | |||
7 | * are met: | |||
8 | * 1. Redistributions of source code must retain the above copyright | |||
9 | * notice, this list of conditions and the following disclaimer. | |||
10 | * 2. Redistributions in binary form must reproduce the above copyright | |||
11 | * notice, this list of conditions and the following disclaimer in the | |||
12 | * documentation and/or other materials provided with the distribution. | |||
13 | * | |||
14 | * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND | |||
15 | * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE | |||
16 | * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE | |||
17 | * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE | |||
18 | * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL | |||
19 | * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS | |||
20 | * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) | |||
21 | * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT | |||
22 | * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY | |||
23 | * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF | |||
24 | * SUCH DAMAGE. | |||
25 | * | |||
26 | */ | |||
27 | ||||
28 | /* | |||
29 | * POSIX message queue implementation. | |||
30 | * | |||
31 | * 1) A mqueue filesystem can be mounted, each message queue appears | |||
32 | * in mounted directory, user can change queue's permission and | |||
33 | * ownership, or remove a queue. Manually creating a file in the | |||
34 | * directory causes a message queue to be created in the kernel with | |||
35 | * default message queue attributes applied and same name used, this | |||
36 | * method is not advocated since mq_open syscall allows user to specify | |||
37 | * different attributes. Also the file system can be mounted multiple | |||
38 | * times at different mount points but shows same contents. | |||
39 | * | |||
40 | * 2) Standard POSIX message queue API. The syscalls do not use vfs layer, | |||
41 | * but directly operate on internal data structure, this allows user to | |||
42 | * use the IPC facility without having to mount mqueue file system. | |||
43 | */ | |||
44 | ||||
45 | #include <sys/cdefs.h> | |||
46 | __FBSDID("$FreeBSD: releng/11.0/sys/kern/uipc_mqueue.c 298567 2016-04-25 04:36:54Z jamie $")__asm__(".ident\t\"" "$FreeBSD: releng/11.0/sys/kern/uipc_mqueue.c 298567 2016-04-25 04:36:54Z jamie $" "\""); | |||
47 | ||||
48 | #include "opt_capsicum.h" | |||
49 | #include "opt_compat.h" | |||
50 | ||||
51 | #include <sys/param.h> | |||
52 | #include <sys/kernel.h> | |||
53 | #include <sys/systm.h> | |||
54 | #include <sys/limits.h> | |||
55 | #include <sys/malloc.h> | |||
56 | #include <sys/buf.h> | |||
57 | #include <sys/capsicum.h> | |||
58 | #include <sys/dirent.h> | |||
59 | #include <sys/event.h> | |||
60 | #include <sys/eventhandler.h> | |||
61 | #include <sys/fcntl.h> | |||
62 | #include <sys/file.h> | |||
63 | #include <sys/filedesc.h> | |||
64 | #include <sys/jail.h> | |||
65 | #include <sys/lock.h> | |||
66 | #include <sys/module.h> | |||
67 | #include <sys/mount.h> | |||
68 | #include <sys/mqueue.h> | |||
69 | #include <sys/mutex.h> | |||
70 | #include <sys/namei.h> | |||
71 | #include <sys/posix4.h> | |||
72 | #include <sys/poll.h> | |||
73 | #include <sys/priv.h> | |||
74 | #include <sys/proc.h> | |||
75 | #include <sys/queue.h> | |||
76 | #include <sys/sysproto.h> | |||
77 | #include <sys/stat.h> | |||
78 | #include <sys/syscall.h> | |||
79 | #include <sys/syscallsubr.h> | |||
80 | #include <sys/sysent.h> | |||
81 | #include <sys/sx.h> | |||
82 | #include <sys/sysctl.h> | |||
83 | #include <sys/taskqueue.h> | |||
84 | #include <sys/unistd.h> | |||
85 | #include <sys/user.h> | |||
86 | #include <sys/vnode.h> | |||
87 | #include <machine/atomic.h> | |||
88 | ||||
89 | FEATURE(p1003_1b_mqueue, "POSIX P1003.1B message queues support")static struct sysctl_oid sysctl___kern_features_p1003_1b_mqueue = { .oid_parent = ((&(&sysctl___kern_features)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | (0x80000000 | 0x00008000)), .oid_arg1 = ( ((int *)((void *)0))), .oid_arg2 = (1), .oid_name = ("p1003_1b_mqueue" ), .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "POSIX P1003.1B message queues support" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set" ); static void const * const __set_sysctl_set_sym_sysctl___kern_features_p1003_1b_mqueue __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_features_p1003_1b_mqueue); _Static_assert((((0x80000000 | 0x00008000) & 0xf) == 0 || ((0x80000000 | 0x00008000) & 0xf) == 2) && sizeof (int) == sizeof(*(((int *)((void *)0)))), "compile-time assertion failed" ); | |||
90 | ||||
91 | /* | |||
92 | * Limits and constants | |||
93 | */ | |||
94 | #define MQFS_NAMELEN255 NAME_MAX255 | |||
95 | #define MQFS_DELEN(8 + 255) (8 + MQFS_NAMELEN255) | |||
96 | ||||
97 | /* node types */ | |||
98 | typedef enum { | |||
99 | mqfstype_none = 0, | |||
100 | mqfstype_root, | |||
101 | mqfstype_dir, | |||
102 | mqfstype_this, | |||
103 | mqfstype_parent, | |||
104 | mqfstype_file, | |||
105 | mqfstype_symlink, | |||
106 | } mqfs_type_t; | |||
107 | ||||
108 | struct mqfs_node; | |||
109 | ||||
110 | /* | |||
111 | * mqfs_info: describes a mqfs instance | |||
112 | */ | |||
113 | struct mqfs_info { | |||
114 | struct sx mi_lock; | |||
115 | struct mqfs_node *mi_root; | |||
116 | struct unrhdr *mi_unrhdr; | |||
117 | }; | |||
118 | ||||
119 | struct mqfs_vdata { | |||
120 | LIST_ENTRY(mqfs_vdata)struct { struct mqfs_vdata *le_next; struct mqfs_vdata **le_prev ; } mv_link; | |||
121 | struct mqfs_node *mv_node; | |||
122 | struct vnode *mv_vnode; | |||
123 | struct task mv_task; | |||
124 | }; | |||
125 | ||||
126 | /* | |||
127 | * mqfs_node: describes a node (file or directory) within a mqfs | |||
128 | */ | |||
129 | struct mqfs_node { | |||
130 | char mn_name[MQFS_NAMELEN255+1]; | |||
131 | struct mqfs_info *mn_info; | |||
132 | struct mqfs_node *mn_parent; | |||
133 | LIST_HEAD(,mqfs_node)struct { struct mqfs_node *lh_first; } mn_children; | |||
134 | LIST_ENTRY(mqfs_node)struct { struct mqfs_node *le_next; struct mqfs_node **le_prev ; } mn_sibling; | |||
135 | LIST_HEAD(,mqfs_vdata)struct { struct mqfs_vdata *lh_first; } mn_vnodes; | |||
136 | const void *mn_pr_root; | |||
137 | int mn_refcount; | |||
138 | mqfs_type_t mn_type; | |||
139 | int mn_deleted; | |||
140 | uint32_t mn_fileno; | |||
141 | void *mn_data; | |||
142 | struct timespec mn_birth; | |||
143 | struct timespec mn_ctime; | |||
144 | struct timespec mn_atime; | |||
145 | struct timespec mn_mtime; | |||
146 | uid_t mn_uid; | |||
147 | gid_t mn_gid; | |||
148 | int mn_mode; | |||
149 | }; | |||
150 | ||||
151 | #define VTON(vp)(((struct mqfs_vdata *)((vp)->v_data))->mv_node) (((struct mqfs_vdata *)((vp)->v_data))->mv_node) | |||
152 | #define VTOMQ(vp)((struct mqueue *)((((struct mqfs_vdata *)((vp)->v_data))-> mv_node)->mn_data)) ((struct mqueue *)(VTON(vp)(((struct mqfs_vdata *)((vp)->v_data))->mv_node)->mn_data)) | |||
153 | #define VFSTOMQFS(m)((struct mqfs_info *)((m)->mnt_data)) ((struct mqfs_info *)((m)->mnt_data)) | |||
154 | #define FPTOMQ(fp)((struct mqueue *)(((struct mqfs_node *) (fp)->f_data)-> mn_data)) ((struct mqueue *)(((struct mqfs_node *) \ | |||
155 | (fp)->f_data)->mn_data)) | |||
156 | ||||
157 | TAILQ_HEAD(msgq, mqueue_msg)struct msgq { struct mqueue_msg *tqh_first; struct mqueue_msg **tqh_last; }; | |||
158 | ||||
159 | struct mqueue; | |||
160 | ||||
161 | struct mqueue_notifier { | |||
162 | LIST_ENTRY(mqueue_notifier)struct { struct mqueue_notifier *le_next; struct mqueue_notifier **le_prev; } nt_link; | |||
163 | struct sigevent nt_sigev; | |||
164 | ksiginfo_t nt_ksi; | |||
165 | struct proc *nt_proc; | |||
166 | }; | |||
167 | ||||
168 | struct mqueue { | |||
169 | struct mtx mq_mutex; | |||
170 | int mq_flags; | |||
171 | long mq_maxmsg; | |||
172 | long mq_msgsize; | |||
173 | long mq_curmsgs; | |||
174 | long mq_totalbytes; | |||
175 | struct msgq mq_msgq; | |||
176 | int mq_receivers; | |||
177 | int mq_senders; | |||
178 | struct selinfo mq_rsel; | |||
179 | struct selinfo mq_wsel; | |||
180 | struct mqueue_notifier *mq_notifier; | |||
181 | }; | |||
182 | ||||
183 | #define MQ_RSEL0x01 0x01 | |||
184 | #define MQ_WSEL0x02 0x02 | |||
185 | ||||
186 | struct mqueue_msg { | |||
187 | TAILQ_ENTRY(mqueue_msg)struct { struct mqueue_msg *tqe_next; struct mqueue_msg **tqe_prev ; } msg_link; | |||
188 | unsigned int msg_prio; | |||
189 | unsigned int msg_size; | |||
190 | /* following real data... */ | |||
191 | }; | |||
192 | ||||
193 | static SYSCTL_NODE(_kern, OID_AUTO, mqueue, CTLFLAG_RW, 0,struct sysctl_oid sysctl___kern_mqueue = { .oid_parent = ((& (&sysctl___kern)->oid_children)), .oid_children = { (( void *)0) }, .oid_number = ((-1)), .oid_kind = (1|((0x80000000 |0x40000000))), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), . oid_name = ("mqueue"), .oid_handler = (0), .oid_fmt = ("N"), . oid_descr = "POSIX real time message queue" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set" ); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue); _Static_assert((( (0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000 )) & 0xf) == 1, "compile-time assertion failed") | |||
194 | "POSIX real time message queue")struct sysctl_oid sysctl___kern_mqueue = { .oid_parent = ((& (&sysctl___kern)->oid_children)), .oid_children = { (( void *)0) }, .oid_number = ((-1)), .oid_kind = (1|((0x80000000 |0x40000000))), .oid_arg1 = (((void *)0)), .oid_arg2 = (0), . oid_name = ("mqueue"), .oid_handler = (0), .oid_fmt = ("N"), . oid_descr = "POSIX real time message queue" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set" ); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue); _Static_assert((( (0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000|0x40000000 )) & 0xf) == 1, "compile-time assertion failed"); | |||
195 | ||||
196 | static int default_maxmsg = 10; | |||
197 | static int default_msgsize = 1024; | |||
198 | ||||
199 | static int maxmsg = 100; | |||
200 | SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsg, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_mqueue_maxmsg = { .oid_parent = ((&(&sysctl___kern_mqueue)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (&maxmsg), .oid_arg2 = (0), .oid_name = ("maxmsg"), .oid_handler = (sysctl_handle_int ), .oid_fmt = ("I"), .oid_descr = "Default maximum messages in queue" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_maxmsg __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_maxmsg); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&maxmsg)), "compile-time assertion failed") | |||
201 | &maxmsg, 0, "Default maximum messages in queue")static struct sysctl_oid sysctl___kern_mqueue_maxmsg = { .oid_parent = ((&(&sysctl___kern_mqueue)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (&maxmsg), .oid_arg2 = (0), .oid_name = ("maxmsg"), .oid_handler = (sysctl_handle_int ), .oid_fmt = ("I"), .oid_descr = "Default maximum messages in queue" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_maxmsg __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_maxmsg); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&maxmsg)), "compile-time assertion failed"); | |||
202 | static int maxmsgsize = 16384; | |||
203 | SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmsgsize, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_mqueue_maxmsgsize = { . oid_parent = ((&(&sysctl___kern_mqueue)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = ( &maxmsgsize), .oid_arg2 = (0), .oid_name = ("maxmsgsize") , .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Default maximum message size" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_maxmsgsize __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_maxmsgsize); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&maxmsgsize)), "compile-time assertion failed") | |||
204 | &maxmsgsize, 0, "Default maximum message size")static struct sysctl_oid sysctl___kern_mqueue_maxmsgsize = { . oid_parent = ((&(&sysctl___kern_mqueue)->oid_children )), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = ( &maxmsgsize), .oid_arg2 = (0), .oid_name = ("maxmsgsize") , .oid_handler = (sysctl_handle_int), .oid_fmt = ("I"), .oid_descr = "Default maximum message size" }; __asm__(".globl " "__start_set_sysctl_set" ); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_maxmsgsize __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_maxmsgsize); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&maxmsgsize)), "compile-time assertion failed"); | |||
205 | static int maxmq = 100; | |||
206 | SYSCTL_INT(_kern_mqueue, OID_AUTO, maxmq, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_mqueue_maxmq = { .oid_parent = ((&(&sysctl___kern_mqueue)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (&maxmq), .oid_arg2 = (0), .oid_name = ("maxmq"), .oid_handler = (sysctl_handle_int ), .oid_fmt = ("I"), .oid_descr = "maximum message queues" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_maxmq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_maxmq); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&maxmq)), "compile-time assertion failed") | |||
207 | &maxmq, 0, "maximum message queues")static struct sysctl_oid sysctl___kern_mqueue_maxmq = { .oid_parent = ((&(&sysctl___kern_mqueue)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (&maxmq), .oid_arg2 = (0), .oid_name = ("maxmq"), .oid_handler = (sysctl_handle_int ), .oid_fmt = ("I"), .oid_descr = "maximum message queues" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_maxmq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_maxmq); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&maxmq)), "compile-time assertion failed"); | |||
208 | static int curmq = 0; | |||
209 | SYSCTL_INT(_kern_mqueue, OID_AUTO, curmq, CTLFLAG_RW,static struct sysctl_oid sysctl___kern_mqueue_curmq = { .oid_parent = ((&(&sysctl___kern_mqueue)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (&curmq), .oid_arg2 = (0), .oid_name = ("curmq"), .oid_handler = (sysctl_handle_int ), .oid_fmt = ("I"), .oid_descr = "current message queue number" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_curmq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_curmq); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&curmq)), "compile-time assertion failed") | |||
210 | &curmq, 0, "current message queue number")static struct sysctl_oid sysctl___kern_mqueue_curmq = { .oid_parent = ((&(&sysctl___kern_mqueue)->oid_children)), .oid_children = { ((void *)0) }, .oid_number = ((-1)), .oid_kind = (2 | 0x00040000 | ((0x80000000|0x40000000))), .oid_arg1 = (&curmq), .oid_arg2 = (0), .oid_name = ("curmq"), .oid_handler = (sysctl_handle_int ), .oid_fmt = ("I"), .oid_descr = "current message queue number" }; __asm__(".globl " "__start_set_sysctl_set"); __asm__(".globl " "__stop_set_sysctl_set"); static void const * const __set_sysctl_set_sym_sysctl___kern_mqueue_curmq __attribute__((__section__("set_" "sysctl_set"))) __attribute__ ((__used__)) = &(sysctl___kern_mqueue_curmq); _Static_assert (((((0x80000000|0x40000000)) & 0xf) == 0 || (((0x80000000 |0x40000000)) & 0xf) == 2) && sizeof(int) == sizeof (*(&curmq)), "compile-time assertion failed"); | |||
211 | static int unloadable = 0; | |||
212 | static MALLOC_DEFINE(M_MQUEUEDATA, "mqdata", "mqueue data")struct malloc_type M_MQUEUEDATA[1] = { { ((void *)0), 877983977 , "mqdata", ((void *)0) } }; static struct sysinit M_MQUEUEDATA_init_sys_init = { SI_SUB_KMEM, SI_ORDER_THIRD, (sysinit_cfunc_t)(sysinit_nfunc_t )malloc_init, ((void *)(M_MQUEUEDATA)) }; __asm__(".globl " "__start_set_sysinit_set" ); __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_M_MQUEUEDATA_init_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(M_MQUEUEDATA_init_sys_init); static struct sysinit M_MQUEUEDATA_uninit_sys_uninit = { SI_SUB_KMEM, SI_ORDER_ANY , (sysinit_cfunc_t)(sysinit_nfunc_t)malloc_uninit, ((void *)( M_MQUEUEDATA)) }; __asm__(".globl " "__start_set_sysuninit_set" ); __asm__(".globl " "__stop_set_sysuninit_set"); static void const * const __set_sysuninit_set_sym_M_MQUEUEDATA_uninit_sys_uninit __attribute__((__section__("set_" "sysuninit_set"))) __attribute__ ((__used__)) = &(M_MQUEUEDATA_uninit_sys_uninit); | |||
213 | ||||
214 | static eventhandler_tag exit_tag; | |||
215 | ||||
216 | /* Only one instance per-system */ | |||
217 | static struct mqfs_info mqfs_data; | |||
218 | static uma_zone_t mqnode_zone; | |||
219 | static uma_zone_t mqueue_zone; | |||
220 | static uma_zone_t mvdata_zone; | |||
221 | static uma_zone_t mqnoti_zone; | |||
222 | static struct vop_vector mqfs_vnodeops; | |||
223 | static struct fileops mqueueops; | |||
224 | static unsigned mqfs_osd_jail_slot; | |||
225 | ||||
226 | /* | |||
227 | * Directory structure construction and manipulation | |||
228 | */ | |||
229 | #ifdef notyet | |||
230 | static struct mqfs_node *mqfs_create_dir(struct mqfs_node *parent, | |||
231 | const char *name, int namelen, struct ucred *cred, int mode); | |||
232 | static struct mqfs_node *mqfs_create_link(struct mqfs_node *parent, | |||
233 | const char *name, int namelen, struct ucred *cred, int mode); | |||
234 | #endif | |||
235 | ||||
236 | static struct mqfs_node *mqfs_create_file(struct mqfs_node *parent, | |||
237 | const char *name, int namelen, struct ucred *cred, int mode); | |||
238 | static int mqfs_destroy(struct mqfs_node *mn); | |||
239 | static void mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn); | |||
240 | static void mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn); | |||
241 | static int mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn); | |||
242 | static int mqfs_prison_remove(void *obj, void *data); | |||
243 | ||||
244 | /* | |||
245 | * Message queue construction and maniplation | |||
246 | */ | |||
247 | static struct mqueue *mqueue_alloc(const struct mq_attr *attr); | |||
248 | static void mqueue_free(struct mqueue *mq); | |||
249 | static int mqueue_send(struct mqueue *mq, const char *msg_ptr, | |||
250 | size_t msg_len, unsigned msg_prio, int waitok, | |||
251 | const struct timespec *abs_timeout); | |||
252 | static int mqueue_receive(struct mqueue *mq, char *msg_ptr, | |||
253 | size_t msg_len, unsigned *msg_prio, int waitok, | |||
254 | const struct timespec *abs_timeout); | |||
255 | static int _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, | |||
256 | int timo); | |||
257 | static int _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, | |||
258 | int timo); | |||
259 | static void mqueue_send_notification(struct mqueue *mq); | |||
260 | static void mqueue_fdclose(struct thread *td, int fd, struct file *fp); | |||
261 | static void mq_proc_exit(void *arg, struct proc *p); | |||
262 | ||||
263 | /* | |||
264 | * kqueue filters | |||
265 | */ | |||
266 | static void filt_mqdetach(struct knote *kn); | |||
267 | static int filt_mqread(struct knote *kn, long hint); | |||
268 | static int filt_mqwrite(struct knote *kn, long hint); | |||
269 | ||||
270 | struct filterops mq_rfiltops = { | |||
271 | .f_isfd = 1, | |||
272 | .f_detach = filt_mqdetach, | |||
273 | .f_event = filt_mqread, | |||
274 | }; | |||
275 | struct filterops mq_wfiltops = { | |||
276 | .f_isfd = 1, | |||
277 | .f_detach = filt_mqdetach, | |||
278 | .f_event = filt_mqwrite, | |||
279 | }; | |||
280 | ||||
281 | /* | |||
282 | * Initialize fileno bitmap | |||
283 | */ | |||
284 | static void | |||
285 | mqfs_fileno_init(struct mqfs_info *mi) | |||
286 | { | |||
287 | struct unrhdr *up; | |||
288 | ||||
289 | up = new_unrhdr(1, INT_MAX0x7fffffff, NULL((void *)0)); | |||
290 | mi->mi_unrhdr = up; | |||
291 | } | |||
292 | ||||
293 | /* | |||
294 | * Tear down fileno bitmap | |||
295 | */ | |||
296 | static void | |||
297 | mqfs_fileno_uninit(struct mqfs_info *mi) | |||
298 | { | |||
299 | struct unrhdr *up; | |||
300 | ||||
301 | up = mi->mi_unrhdr; | |||
302 | mi->mi_unrhdr = NULL((void *)0); | |||
303 | delete_unrhdr(up); | |||
304 | } | |||
305 | ||||
306 | /* | |||
307 | * Allocate a file number | |||
308 | */ | |||
309 | static void | |||
310 | mqfs_fileno_alloc(struct mqfs_info *mi, struct mqfs_node *mn) | |||
311 | { | |||
312 | /* make sure our parent has a file number */ | |||
313 | if (mn->mn_parent && !mn->mn_parent->mn_fileno) | |||
314 | mqfs_fileno_alloc(mi, mn->mn_parent); | |||
315 | ||||
316 | switch (mn->mn_type) { | |||
317 | case mqfstype_root: | |||
318 | case mqfstype_dir: | |||
319 | case mqfstype_file: | |||
320 | case mqfstype_symlink: | |||
321 | mn->mn_fileno = alloc_unr(mi->mi_unrhdr); | |||
322 | break; | |||
323 | case mqfstype_this: | |||
324 | KASSERT(mn->mn_parent != NULL,do { } while (0) | |||
325 | ("mqfstype_this node has no parent"))do { } while (0); | |||
326 | mn->mn_fileno = mn->mn_parent->mn_fileno; | |||
327 | break; | |||
328 | case mqfstype_parent: | |||
329 | KASSERT(mn->mn_parent != NULL,do { } while (0) | |||
330 | ("mqfstype_parent node has no parent"))do { } while (0); | |||
331 | if (mn->mn_parent == mi->mi_root) { | |||
332 | mn->mn_fileno = mn->mn_parent->mn_fileno; | |||
333 | break; | |||
334 | } | |||
335 | KASSERT(mn->mn_parent->mn_parent != NULL,do { } while (0) | |||
336 | ("mqfstype_parent node has no grandparent"))do { } while (0); | |||
337 | mn->mn_fileno = mn->mn_parent->mn_parent->mn_fileno; | |||
338 | break; | |||
339 | default: | |||
340 | KASSERT(0,do { } while (0) | |||
341 | ("mqfs_fileno_alloc() called for unknown type node: %d",do { } while (0) | |||
342 | mn->mn_type))do { } while (0); | |||
343 | break; | |||
344 | } | |||
345 | } | |||
346 | ||||
347 | /* | |||
348 | * Release a file number | |||
349 | */ | |||
350 | static void | |||
351 | mqfs_fileno_free(struct mqfs_info *mi, struct mqfs_node *mn) | |||
352 | { | |||
353 | switch (mn->mn_type) { | |||
354 | case mqfstype_root: | |||
355 | case mqfstype_dir: | |||
356 | case mqfstype_file: | |||
357 | case mqfstype_symlink: | |||
358 | free_unr(mi->mi_unrhdr, mn->mn_fileno); | |||
359 | break; | |||
360 | case mqfstype_this: | |||
361 | case mqfstype_parent: | |||
362 | /* ignore these, as they don't "own" their file number */ | |||
363 | break; | |||
364 | default: | |||
365 | KASSERT(0,do { } while (0) | |||
366 | ("mqfs_fileno_free() called for unknown type node: %d",do { } while (0) | |||
367 | mn->mn_type))do { } while (0); | |||
368 | break; | |||
369 | } | |||
370 | } | |||
371 | ||||
372 | static __inline struct mqfs_node * | |||
373 | mqnode_alloc(void) | |||
374 | { | |||
375 | return uma_zalloc(mqnode_zone, M_WAITOK0x0002 | M_ZERO0x0100); | |||
376 | } | |||
377 | ||||
378 | static __inline void | |||
379 | mqnode_free(struct mqfs_node *node) | |||
380 | { | |||
381 | uma_zfree(mqnode_zone, node); | |||
382 | } | |||
383 | ||||
384 | static __inline void | |||
385 | mqnode_addref(struct mqfs_node *node) | |||
386 | { | |||
387 | atomic_fetchadd_int(&node->mn_refcount, 1); | |||
388 | } | |||
389 | ||||
390 | static __inline void | |||
391 | mqnode_release(struct mqfs_node *node) | |||
392 | { | |||
393 | struct mqfs_info *mqfs; | |||
394 | int old, exp; | |||
395 | ||||
396 | mqfs = node->mn_info; | |||
397 | old = atomic_fetchadd_int(&node->mn_refcount, -1); | |||
398 | if (node->mn_type == mqfstype_dir || | |||
399 | node->mn_type == mqfstype_root) | |||
400 | exp = 3; /* include . and .. */ | |||
401 | else | |||
402 | exp = 1; | |||
403 | if (old == exp) { | |||
404 | int locked = sx_xlocked(&mqfs->mi_lock)(((&mqfs->mi_lock)->sx_lock & ~((0x01 | 0x02 | 0x04 | 0x08) & ~0x01)) == (uintptr_t)(__curthread())); | |||
405 | if (!locked) | |||
406 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (406)); | |||
407 | mqfs_destroy(node); | |||
408 | if (!locked) | |||
409 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (409)); | |||
410 | } | |||
411 | } | |||
412 | ||||
413 | /* | |||
414 | * Add a node to a directory | |||
415 | */ | |||
416 | static int | |||
417 | mqfs_add_node(struct mqfs_node *parent, struct mqfs_node *node) | |||
418 | { | |||
419 | KASSERT(parent != NULL, ("%s(): parent is NULL", __func__))do { } while (0); | |||
420 | KASSERT(parent->mn_info != NULL,do { } while (0) | |||
421 | ("%s(): parent has no mn_info", __func__))do { } while (0); | |||
422 | KASSERT(parent->mn_type == mqfstype_dir ||do { } while (0) | |||
423 | parent->mn_type == mqfstype_root,do { } while (0) | |||
424 | ("%s(): parent is not a directory", __func__))do { } while (0); | |||
425 | ||||
426 | node->mn_info = parent->mn_info; | |||
427 | node->mn_parent = parent; | |||
428 | LIST_INIT(&node->mn_children)do { (((&node->mn_children))->lh_first) = ((void *) 0); } while (0); | |||
429 | LIST_INIT(&node->mn_vnodes)do { (((&node->mn_vnodes))->lh_first) = ((void *)0) ; } while (0); | |||
430 | LIST_INSERT_HEAD(&parent->mn_children, node, mn_sibling)do { ; if (((((node))->mn_sibling.le_next) = (((&parent ->mn_children))->lh_first)) != ((void *)0)) (((&parent ->mn_children))->lh_first)->mn_sibling.le_prev = & (((node))->mn_sibling.le_next); (((&parent->mn_children ))->lh_first) = (node); (node)->mn_sibling.le_prev = & (((&parent->mn_children))->lh_first); } while (0); | |||
431 | mqnode_addref(parent); | |||
432 | return (0); | |||
433 | } | |||
434 | ||||
435 | static struct mqfs_node * | |||
436 | mqfs_create_node(const char *name, int namelen, struct ucred *cred, int mode, | |||
437 | int nodetype) | |||
438 | { | |||
439 | struct mqfs_node *node; | |||
440 | ||||
441 | node = mqnode_alloc(); | |||
442 | strncpy(node->mn_name, name, namelen); | |||
443 | node->mn_pr_root = cred->cr_prison->pr_root; | |||
444 | node->mn_type = nodetype; | |||
445 | node->mn_refcount = 1; | |||
446 | vfs_timestamp(&node->mn_birth); | |||
447 | node->mn_ctime = node->mn_atime = node->mn_mtime | |||
448 | = node->mn_birth; | |||
449 | node->mn_uid = cred->cr_uid; | |||
450 | node->mn_gid = cred->cr_gidcr_groups[0]; | |||
451 | node->mn_mode = mode; | |||
452 | return (node); | |||
453 | } | |||
454 | ||||
455 | /* | |||
456 | * Create a file | |||
457 | */ | |||
458 | static struct mqfs_node * | |||
459 | mqfs_create_file(struct mqfs_node *parent, const char *name, int namelen, | |||
460 | struct ucred *cred, int mode) | |||
461 | { | |||
462 | struct mqfs_node *node; | |||
463 | ||||
464 | node = mqfs_create_node(name, namelen, cred, mode, mqfstype_file); | |||
465 | if (mqfs_add_node(parent, node) != 0) { | |||
466 | mqnode_free(node); | |||
467 | return (NULL((void *)0)); | |||
468 | } | |||
469 | return (node); | |||
470 | } | |||
471 | ||||
472 | /* | |||
473 | * Add . and .. to a directory | |||
474 | */ | |||
475 | static int | |||
476 | mqfs_fixup_dir(struct mqfs_node *parent) | |||
477 | { | |||
478 | struct mqfs_node *dir; | |||
479 | ||||
480 | dir = mqnode_alloc(); | |||
481 | dir->mn_name[0] = '.'; | |||
482 | dir->mn_type = mqfstype_this; | |||
483 | dir->mn_refcount = 1; | |||
484 | if (mqfs_add_node(parent, dir) != 0) { | |||
485 | mqnode_free(dir); | |||
486 | return (-1); | |||
487 | } | |||
488 | ||||
489 | dir = mqnode_alloc(); | |||
490 | dir->mn_name[0] = dir->mn_name[1] = '.'; | |||
491 | dir->mn_type = mqfstype_parent; | |||
492 | dir->mn_refcount = 1; | |||
493 | ||||
494 | if (mqfs_add_node(parent, dir) != 0) { | |||
495 | mqnode_free(dir); | |||
496 | return (-1); | |||
497 | } | |||
498 | ||||
499 | return (0); | |||
500 | } | |||
501 | ||||
502 | #ifdef notyet | |||
503 | ||||
504 | /* | |||
505 | * Create a directory | |||
506 | */ | |||
507 | static struct mqfs_node * | |||
508 | mqfs_create_dir(struct mqfs_node *parent, const char *name, int namelen, | |||
509 | struct ucred *cred, int mode) | |||
510 | { | |||
511 | struct mqfs_node *node; | |||
512 | ||||
513 | node = mqfs_create_node(name, namelen, cred, mode, mqfstype_dir); | |||
514 | if (mqfs_add_node(parent, node) != 0) { | |||
515 | mqnode_free(node); | |||
516 | return (NULL((void *)0)); | |||
517 | } | |||
518 | ||||
519 | if (mqfs_fixup_dir(node) != 0) { | |||
520 | mqfs_destroy(node); | |||
521 | return (NULL((void *)0)); | |||
522 | } | |||
523 | return (node); | |||
524 | } | |||
525 | ||||
526 | /* | |||
527 | * Create a symlink | |||
528 | */ | |||
529 | static struct mqfs_node * | |||
530 | mqfs_create_link(struct mqfs_node *parent, const char *name, int namelen, | |||
531 | struct ucred *cred, int mode) | |||
532 | { | |||
533 | struct mqfs_node *node; | |||
534 | ||||
535 | node = mqfs_create_node(name, namelen, cred, mode, mqfstype_symlink); | |||
536 | if (mqfs_add_node(parent, node) != 0) { | |||
537 | mqnode_free(node); | |||
538 | return (NULL((void *)0)); | |||
539 | } | |||
540 | return (node); | |||
541 | } | |||
542 | ||||
543 | #endif | |||
544 | ||||
545 | /* | |||
546 | * Destroy a node or a tree of nodes | |||
547 | */ | |||
548 | static int | |||
549 | mqfs_destroy(struct mqfs_node *node) | |||
550 | { | |||
551 | struct mqfs_node *parent; | |||
552 | ||||
553 | KASSERT(node != NULL,do { } while (0) | |||
554 | ("%s(): node is NULL", __func__))do { } while (0); | |||
555 | KASSERT(node->mn_info != NULL,do { } while (0) | |||
556 | ("%s(): node has no mn_info", __func__))do { } while (0); | |||
557 | ||||
558 | /* destroy children */ | |||
559 | if (node->mn_type == mqfstype_dir || node->mn_type == mqfstype_root) | |||
560 | while (! LIST_EMPTY(&node->mn_children)((&node->mn_children)->lh_first == ((void *)0))) | |||
561 | mqfs_destroy(LIST_FIRST(&node->mn_children)((&node->mn_children)->lh_first)); | |||
562 | ||||
563 | /* unlink from parent */ | |||
564 | if ((parent = node->mn_parent) != NULL((void *)0)) { | |||
565 | KASSERT(parent->mn_info == node->mn_info,do { } while (0) | |||
566 | ("%s(): parent has different mn_info", __func__))do { } while (0); | |||
567 | LIST_REMOVE(node, mn_sibling)do { ; ; ; ; if ((((node))->mn_sibling.le_next) != ((void * )0)) (((node))->mn_sibling.le_next)->mn_sibling.le_prev = (node)->mn_sibling.le_prev; *(node)->mn_sibling.le_prev = (((node))->mn_sibling.le_next); ; ; } while (0); | |||
568 | } | |||
569 | ||||
570 | if (node->mn_fileno != 0) | |||
571 | mqfs_fileno_free(node->mn_info, node); | |||
572 | if (node->mn_data != NULL((void *)0)) | |||
573 | mqueue_free(node->mn_data); | |||
574 | mqnode_free(node); | |||
575 | return (0); | |||
576 | } | |||
577 | ||||
578 | /* | |||
579 | * Mount a mqfs instance | |||
580 | */ | |||
581 | static int | |||
582 | mqfs_mount(struct mount *mp) | |||
583 | { | |||
584 | struct statfs *sbp; | |||
585 | ||||
586 | if (mp->mnt_flag & MNT_UPDATE0x0000000000010000ULL) | |||
587 | return (EOPNOTSUPP45); | |||
588 | ||||
589 | mp->mnt_data = &mqfs_data; | |||
590 | MNT_ILOCK(mp)__mtx_lock_flags(&((((&(mp)->mnt_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (590)); | |||
591 | mp->mnt_flag |= MNT_LOCAL0x0000000000001000ULL; | |||
592 | MNT_IUNLOCK(mp)__mtx_unlock_flags(&((((&(mp)->mnt_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (592)); | |||
593 | vfs_getnewfsid(mp); | |||
594 | ||||
595 | sbp = &mp->mnt_stat; | |||
596 | vfs_mountedfrom(mp, "mqueue"); | |||
597 | sbp->f_bsize = PAGE_SIZE(1<<12); | |||
598 | sbp->f_iosize = PAGE_SIZE(1<<12); | |||
599 | sbp->f_blocks = 1; | |||
600 | sbp->f_bfree = 0; | |||
601 | sbp->f_bavail = 0; | |||
602 | sbp->f_files = 1; | |||
603 | sbp->f_ffree = 0; | |||
604 | return (0); | |||
605 | } | |||
606 | ||||
607 | /* | |||
608 | * Unmount a mqfs instance | |||
609 | */ | |||
610 | static int | |||
611 | mqfs_unmount(struct mount *mp, int mntflags) | |||
612 | { | |||
613 | int error; | |||
614 | ||||
615 | error = vflush(mp, 0, (mntflags & MNT_FORCE0x0000000000080000ULL) ? FORCECLOSE0x0002 : 0, | |||
616 | curthread(__curthread())); | |||
617 | return (error); | |||
618 | } | |||
619 | ||||
620 | /* | |||
621 | * Return a root vnode | |||
622 | */ | |||
623 | static int | |||
624 | mqfs_root(struct mount *mp, int flags, struct vnode **vpp) | |||
625 | { | |||
626 | struct mqfs_info *mqfs; | |||
627 | int ret; | |||
628 | ||||
629 | mqfs = VFSTOMQFS(mp)((struct mqfs_info *)((mp)->mnt_data)); | |||
630 | ret = mqfs_allocv(mp, vpp, mqfs->mi_root); | |||
631 | return (ret); | |||
632 | } | |||
633 | ||||
634 | /* | |||
635 | * Return filesystem stats | |||
636 | */ | |||
637 | static int | |||
638 | mqfs_statfs(struct mount *mp, struct statfs *sbp) | |||
639 | { | |||
640 | /* XXX update statistics */ | |||
641 | return (0); | |||
642 | } | |||
643 | ||||
644 | /* | |||
645 | * Initialize a mqfs instance | |||
646 | */ | |||
647 | static int | |||
648 | mqfs_init(struct vfsconf *vfc) | |||
649 | { | |||
650 | struct mqfs_node *root; | |||
651 | struct mqfs_info *mi; | |||
652 | osd_method_t methods[PR_MAXMETHOD6] = { | |||
653 | [PR_METHOD_REMOVE5] = mqfs_prison_remove, | |||
654 | }; | |||
655 | ||||
656 | mqnode_zone = uma_zcreate("mqnode", sizeof(struct mqfs_node), | |||
657 | NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), 0); | |||
658 | mqueue_zone = uma_zcreate("mqueue", sizeof(struct mqueue), | |||
659 | NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), 0); | |||
660 | mvdata_zone = uma_zcreate("mvdata", | |||
661 | sizeof(struct mqfs_vdata), NULL((void *)0), NULL((void *)0), NULL((void *)0), | |||
662 | NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), 0); | |||
663 | mqnoti_zone = uma_zcreate("mqnotifier", sizeof(struct mqueue_notifier), | |||
664 | NULL((void *)0), NULL((void *)0), NULL((void *)0), NULL((void *)0), UMA_ALIGN_PTR(sizeof(void *) - 1), 0); | |||
665 | mi = &mqfs_data; | |||
666 | sx_init(&mi->mi_lock, "mqfs lock")sx_init_flags((&mi->mi_lock), ("mqfs lock"), 0); | |||
667 | /* set up the root diretory */ | |||
668 | root = mqfs_create_node("/", 1, curthread(__curthread())->td_ucred, 01777, | |||
669 | mqfstype_root); | |||
670 | root->mn_info = mi; | |||
671 | LIST_INIT(&root->mn_children)do { (((&root->mn_children))->lh_first) = ((void *) 0); } while (0); | |||
672 | LIST_INIT(&root->mn_vnodes)do { (((&root->mn_vnodes))->lh_first) = ((void *)0) ; } while (0); | |||
673 | mi->mi_root = root; | |||
674 | mqfs_fileno_init(mi); | |||
675 | mqfs_fileno_alloc(mi, root); | |||
676 | mqfs_fixup_dir(root); | |||
677 | exit_tag = EVENTHANDLER_REGISTER(process_exit, mq_proc_exit, NULL,eventhandler_register(((void *)0), "process_exit", mq_proc_exit , ((void *)0), 10000) | |||
678 | EVENTHANDLER_PRI_ANY)eventhandler_register(((void *)0), "process_exit", mq_proc_exit , ((void *)0), 10000); | |||
679 | mq_fdclose = mqueue_fdclose; | |||
680 | p31b_setcfg(CTL_P1003_1B_MESSAGE_PASSING6, _POSIX_MESSAGE_PASSING200112L); | |||
681 | mqfs_osd_jail_slot = osd_jail_register(NULL, methods)osd_register(1, (((void *)0)), (methods)); | |||
682 | return (0); | |||
683 | } | |||
684 | ||||
685 | /* | |||
686 | * Destroy a mqfs instance | |||
687 | */ | |||
688 | static int | |||
689 | mqfs_uninit(struct vfsconf *vfc) | |||
690 | { | |||
691 | struct mqfs_info *mi; | |||
692 | ||||
693 | if (!unloadable) | |||
694 | return (EOPNOTSUPP45); | |||
695 | osd_jail_deregister(mqfs_osd_jail_slot)osd_deregister(1, (mqfs_osd_jail_slot)); | |||
696 | EVENTHANDLER_DEREGISTER(process_exit, exit_tag)do { struct eventhandler_list *_el; if ((_el = eventhandler_find_list ("process_exit")) != ((void *)0)) eventhandler_deregister(_el , exit_tag); } while(0); | |||
697 | mi = &mqfs_data; | |||
698 | mqfs_destroy(mi->mi_root); | |||
699 | mi->mi_root = NULL((void *)0); | |||
700 | mqfs_fileno_uninit(mi); | |||
701 | sx_destroy(&mi->mi_lock); | |||
702 | uma_zdestroy(mqnode_zone); | |||
703 | uma_zdestroy(mqueue_zone); | |||
704 | uma_zdestroy(mvdata_zone); | |||
705 | uma_zdestroy(mqnoti_zone); | |||
706 | return (0); | |||
707 | } | |||
708 | ||||
709 | /* | |||
710 | * task routine | |||
711 | */ | |||
712 | static void | |||
713 | do_recycle(void *context, int pending __unused__attribute__((__unused__))) | |||
714 | { | |||
715 | struct vnode *vp = (struct vnode *)context; | |||
716 | ||||
717 | vrecycle(vp); | |||
718 | vdrop(vp)_vdrop((vp), 0); | |||
719 | } | |||
720 | ||||
721 | /* | |||
722 | * Allocate a vnode | |||
723 | */ | |||
724 | static int | |||
725 | mqfs_allocv(struct mount *mp, struct vnode **vpp, struct mqfs_node *pn) | |||
726 | { | |||
727 | struct mqfs_vdata *vd; | |||
728 | struct mqfs_info *mqfs; | |||
729 | struct vnode *newvpp; | |||
730 | int error; | |||
731 | ||||
732 | mqfs = pn->mn_info; | |||
733 | *vpp = NULL((void *)0); | |||
734 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (734)); | |||
735 | LIST_FOREACH(vd, &pn->mn_vnodes, mv_link)for ((vd) = (((&pn->mn_vnodes))->lh_first); (vd); ( vd) = (((vd))->mv_link.le_next)) { | |||
736 | if (vd->mv_vnode->v_mount == mp) { | |||
737 | vhold(vd->mv_vnode)_vhold((vd->mv_vnode), 0); | |||
738 | break; | |||
739 | } | |||
740 | } | |||
741 | ||||
742 | if (vd != NULL((void *)0)) { | |||
743 | found: | |||
744 | *vpp = vd->mv_vnode; | |||
745 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (745)); | |||
746 | error = vget(*vpp, LK_RETRY0x000400 | LK_EXCLUSIVE0x080000, curthread(__curthread())); | |||
747 | vdrop(*vpp)_vdrop((*vpp), 0); | |||
748 | return (error); | |||
749 | } | |||
750 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (750)); | |||
751 | ||||
752 | error = getnewvnode("mqueue", mp, &mqfs_vnodeops, &newvpp); | |||
753 | if (error) | |||
754 | return (error); | |||
755 | vn_lock(newvpp, LK_EXCLUSIVE | LK_RETRY)_vn_lock(newvpp, 0x080000 | 0x000400, "/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" , 755); | |||
756 | error = insmntque(newvpp, mp); | |||
757 | if (error != 0) | |||
758 | return (error); | |||
759 | ||||
760 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (760)); | |||
761 | /* | |||
762 | * Check if it has already been allocated | |||
763 | * while we were blocked. | |||
764 | */ | |||
765 | LIST_FOREACH(vd, &pn->mn_vnodes, mv_link)for ((vd) = (((&pn->mn_vnodes))->lh_first); (vd); ( vd) = (((vd))->mv_link.le_next)) { | |||
766 | if (vd->mv_vnode->v_mount == mp) { | |||
767 | vhold(vd->mv_vnode)_vhold((vd->mv_vnode), 0); | |||
768 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (768)); | |||
769 | ||||
770 | vgone(newvpp); | |||
771 | vput(newvpp); | |||
772 | goto found; | |||
773 | } | |||
774 | } | |||
775 | ||||
776 | *vpp = newvpp; | |||
777 | ||||
778 | vd = uma_zalloc(mvdata_zone, M_WAITOK0x0002); | |||
779 | (*vpp)->v_data = vd; | |||
780 | vd->mv_vnode = *vpp; | |||
781 | vd->mv_node = pn; | |||
782 | TASK_INIT(&vd->mv_task, 0, do_recycle, *vpp)do { (&vd->mv_task)->ta_pending = 0; (&vd->mv_task )->ta_priority = (0); (&vd->mv_task)->ta_func = ( do_recycle); (&vd->mv_task)->ta_context = (*vpp); } while (0); | |||
783 | LIST_INSERT_HEAD(&pn->mn_vnodes, vd, mv_link)do { ; if (((((vd))->mv_link.le_next) = (((&pn->mn_vnodes ))->lh_first)) != ((void *)0)) (((&pn->mn_vnodes))-> lh_first)->mv_link.le_prev = &(((vd))->mv_link.le_next ); (((&pn->mn_vnodes))->lh_first) = (vd); (vd)-> mv_link.le_prev = &(((&pn->mn_vnodes))->lh_first ); } while (0); | |||
784 | mqnode_addref(pn); | |||
785 | switch (pn->mn_type) { | |||
786 | case mqfstype_root: | |||
787 | (*vpp)->v_vflag = VV_ROOT0x0001; | |||
788 | /* fall through */ | |||
789 | case mqfstype_dir: | |||
790 | case mqfstype_this: | |||
791 | case mqfstype_parent: | |||
792 | (*vpp)->v_type = VDIR; | |||
793 | break; | |||
794 | case mqfstype_file: | |||
795 | (*vpp)->v_type = VREG; | |||
796 | break; | |||
797 | case mqfstype_symlink: | |||
798 | (*vpp)->v_type = VLNK; | |||
799 | break; | |||
800 | case mqfstype_none: | |||
801 | KASSERT(0, ("mqfs_allocf called for null node\n"))do { } while (0); | |||
802 | default: | |||
803 | panic("%s has unexpected type: %d", pn->mn_name, pn->mn_type); | |||
804 | } | |||
805 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (805)); | |||
806 | return (0); | |||
807 | } | |||
808 | ||||
809 | /* | |||
810 | * Search a directory entry | |||
811 | */ | |||
812 | static struct mqfs_node * | |||
813 | mqfs_search(struct mqfs_node *pd, const char *name, int len, struct ucred *cred) | |||
814 | { | |||
815 | struct mqfs_node *pn; | |||
816 | const void *pr_root; | |||
817 | ||||
818 | sx_assert(&pd->mn_info->mi_lock, SX_LOCKED)(void)0; | |||
819 | pr_root = cred->cr_prison->pr_root; | |||
820 | LIST_FOREACH(pn, &pd->mn_children, mn_sibling)for ((pn) = (((&pd->mn_children))->lh_first); (pn); (pn) = (((pn))->mn_sibling.le_next)) { | |||
821 | /* Only match names within the same prison root directory */ | |||
822 | if ((pn->mn_pr_root == NULL((void *)0) || pn->mn_pr_root == pr_root) && | |||
823 | strncmp(pn->mn_name, name, len) == 0 && | |||
824 | pn->mn_name[len] == '\0') | |||
825 | return (pn); | |||
826 | } | |||
827 | return (NULL((void *)0)); | |||
828 | } | |||
829 | ||||
830 | /* | |||
831 | * Look up a file or directory. | |||
832 | */ | |||
833 | static int | |||
834 | mqfs_lookupx(struct vop_cachedlookup_args *ap) | |||
835 | { | |||
836 | struct componentname *cnp; | |||
837 | struct vnode *dvp, **vpp; | |||
838 | struct mqfs_node *pd; | |||
839 | struct mqfs_node *pn; | |||
840 | struct mqfs_info *mqfs; | |||
841 | int nameiop, flags, error, namelen; | |||
842 | char *pname; | |||
843 | struct thread *td; | |||
844 | ||||
845 | cnp = ap->a_cnp; | |||
846 | vpp = ap->a_vpp; | |||
847 | dvp = ap->a_dvp; | |||
848 | pname = cnp->cn_nameptr; | |||
849 | namelen = cnp->cn_namelen; | |||
850 | td = cnp->cn_thread; | |||
851 | flags = cnp->cn_flags; | |||
852 | nameiop = cnp->cn_nameiop; | |||
853 | pd = VTON(dvp)(((struct mqfs_vdata *)((dvp)->v_data))->mv_node); | |||
854 | pn = NULL((void *)0); | |||
855 | mqfs = pd->mn_info; | |||
856 | *vpp = NULLVP((struct vnode *)((void *)0)); | |||
857 | ||||
858 | if (dvp->v_type != VDIR) | |||
859 | return (ENOTDIR20); | |||
860 | ||||
861 | error = VOP_ACCESS(dvp, VEXEC000000000100, cnp->cn_cred, cnp->cn_thread); | |||
862 | if (error) | |||
863 | return (error); | |||
864 | ||||
865 | /* shortcut: check if the name is too long */ | |||
866 | if (cnp->cn_namelen >= MQFS_NAMELEN255) | |||
867 | return (ENOENT2); | |||
868 | ||||
869 | /* self */ | |||
870 | if (namelen == 1 && pname[0] == '.') { | |||
871 | if ((flags & ISLASTCN0x00008000) && nameiop != LOOKUP0) | |||
872 | return (EINVAL22); | |||
873 | pn = pd; | |||
874 | *vpp = dvp; | |||
875 | VREF(dvp)vref(dvp); | |||
876 | return (0); | |||
877 | } | |||
878 | ||||
879 | /* parent */ | |||
880 | if (cnp->cn_flags & ISDOTDOT0x00002000) { | |||
881 | if (dvp->v_vflag & VV_ROOT0x0001) | |||
882 | return (EIO5); | |||
883 | if ((flags & ISLASTCN0x00008000) && nameiop != LOOKUP0) | |||
884 | return (EINVAL22); | |||
885 | VOP_UNLOCK(dvp, 0); | |||
886 | KASSERT(pd->mn_parent, ("non-root directory has no parent"))do { } while (0); | |||
887 | pn = pd->mn_parent; | |||
888 | error = mqfs_allocv(dvp->v_mount, vpp, pn); | |||
889 | vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY)_vn_lock(dvp, 0x080000 | 0x000400, "/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" , 889); | |||
890 | return (error); | |||
891 | } | |||
892 | ||||
893 | /* named node */ | |||
894 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (894)); | |||
895 | pn = mqfs_search(pd, pname, namelen, cnp->cn_cred); | |||
896 | if (pn != NULL((void *)0)) | |||
897 | mqnode_addref(pn); | |||
898 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (898)); | |||
899 | ||||
900 | /* found */ | |||
901 | if (pn != NULL((void *)0)) { | |||
902 | /* DELETE */ | |||
903 | if (nameiop == DELETE2 && (flags & ISLASTCN0x00008000)) { | |||
904 | error = VOP_ACCESS(dvp, VWRITE000000000200, cnp->cn_cred, td); | |||
905 | if (error) { | |||
906 | mqnode_release(pn); | |||
907 | return (error); | |||
908 | } | |||
909 | if (*vpp == dvp) { | |||
910 | VREF(dvp)vref(dvp); | |||
911 | *vpp = dvp; | |||
912 | mqnode_release(pn); | |||
913 | return (0); | |||
914 | } | |||
915 | } | |||
916 | ||||
917 | /* allocate vnode */ | |||
918 | error = mqfs_allocv(dvp->v_mount, vpp, pn); | |||
919 | mqnode_release(pn); | |||
920 | if (error == 0 && cnp->cn_flags & MAKEENTRY0x00004000) | |||
921 | cache_enter(dvp, *vpp, cnp)cache_enter_time(dvp, *vpp, cnp, ((void *)0), ((void *)0)); | |||
922 | return (error); | |||
923 | } | |||
924 | ||||
925 | /* not found */ | |||
926 | ||||
927 | /* will create a new entry in the directory ? */ | |||
928 | if ((nameiop == CREATE1 || nameiop == RENAME3) && (flags & LOCKPARENT0x0008) | |||
929 | && (flags & ISLASTCN0x00008000)) { | |||
930 | error = VOP_ACCESS(dvp, VWRITE000000000200, cnp->cn_cred, td); | |||
931 | if (error) | |||
932 | return (error); | |||
933 | cnp->cn_flags |= SAVENAME0x00000800; | |||
934 | return (EJUSTRETURN(-2)); | |||
935 | } | |||
936 | return (ENOENT2); | |||
937 | } | |||
938 | ||||
939 | #if 0 | |||
940 | struct vop_lookup_args { | |||
941 | struct vop_generic_args a_gen; | |||
942 | struct vnode *a_dvp; | |||
943 | struct vnode **a_vpp; | |||
944 | struct componentname *a_cnp; | |||
945 | }; | |||
946 | #endif | |||
947 | ||||
948 | /* | |||
949 | * vnode lookup operation | |||
950 | */ | |||
951 | static int | |||
952 | mqfs_lookup(struct vop_cachedlookup_args *ap) | |||
953 | { | |||
954 | int rc; | |||
955 | ||||
956 | rc = mqfs_lookupx(ap); | |||
957 | return (rc); | |||
958 | } | |||
959 | ||||
960 | #if 0 | |||
961 | struct vop_create_args { | |||
962 | struct vnode *a_dvp; | |||
963 | struct vnode **a_vpp; | |||
964 | struct componentname *a_cnp; | |||
965 | struct vattr *a_vap; | |||
966 | }; | |||
967 | #endif | |||
968 | ||||
969 | /* | |||
970 | * vnode creation operation | |||
971 | */ | |||
972 | static int | |||
973 | mqfs_create(struct vop_create_args *ap) | |||
974 | { | |||
975 | struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount)((struct mqfs_info *)((ap->a_dvp->v_mount)->mnt_data )); | |||
976 | struct componentname *cnp = ap->a_cnp; | |||
977 | struct mqfs_node *pd; | |||
978 | struct mqfs_node *pn; | |||
979 | struct mqueue *mq; | |||
980 | int error; | |||
981 | ||||
982 | pd = VTON(ap->a_dvp)(((struct mqfs_vdata *)((ap->a_dvp)->v_data))->mv_node ); | |||
983 | if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) | |||
984 | return (ENOTDIR20); | |||
985 | mq = mqueue_alloc(NULL((void *)0)); | |||
986 | if (mq == NULL((void *)0)) | |||
987 | return (EAGAIN35); | |||
988 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (988)); | |||
989 | if ((cnp->cn_flags & HASBUF0x00000400) == 0) | |||
990 | panic("%s: no name", __func__); | |||
991 | pn = mqfs_create_file(pd, cnp->cn_nameptr, cnp->cn_namelen, | |||
992 | cnp->cn_cred, ap->a_vap->va_mode); | |||
993 | if (pn == NULL((void *)0)) { | |||
994 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (994)); | |||
995 | error = ENOSPC28; | |||
996 | } else { | |||
997 | mqnode_addref(pn); | |||
998 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (998)); | |||
999 | error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); | |||
1000 | mqnode_release(pn); | |||
1001 | if (error) | |||
1002 | mqfs_destroy(pn); | |||
1003 | else | |||
1004 | pn->mn_data = mq; | |||
1005 | } | |||
1006 | if (error) | |||
1007 | mqueue_free(mq); | |||
1008 | return (error); | |||
1009 | } | |||
1010 | ||||
1011 | /* | |||
1012 | * Remove an entry | |||
1013 | */ | |||
1014 | static | |||
1015 | int do_unlink(struct mqfs_node *pn, struct ucred *ucred) | |||
1016 | { | |||
1017 | struct mqfs_node *parent; | |||
1018 | struct mqfs_vdata *vd; | |||
1019 | int error = 0; | |||
1020 | ||||
1021 | sx_assert(&pn->mn_info->mi_lock, SX_LOCKED)(void)0; | |||
1022 | ||||
1023 | if (ucred->cr_uid != pn->mn_uid && | |||
1024 | (error = priv_check_cred(ucred, PRIV_MQ_ADMIN180, 0)) != 0) | |||
1025 | error = EACCES13; | |||
1026 | else if (!pn->mn_deleted) { | |||
1027 | parent = pn->mn_parent; | |||
1028 | pn->mn_parent = NULL((void *)0); | |||
1029 | pn->mn_deleted = 1; | |||
1030 | LIST_REMOVE(pn, mn_sibling)do { ; ; ; ; if ((((pn))->mn_sibling.le_next) != ((void *) 0)) (((pn))->mn_sibling.le_next)->mn_sibling.le_prev = ( pn)->mn_sibling.le_prev; *(pn)->mn_sibling.le_prev = (( (pn))->mn_sibling.le_next); ; ; } while (0); | |||
1031 | LIST_FOREACH(vd, &pn->mn_vnodes, mv_link)for ((vd) = (((&pn->mn_vnodes))->lh_first); (vd); ( vd) = (((vd))->mv_link.le_next)) { | |||
1032 | cache_purge(vd->mv_vnode); | |||
1033 | vhold(vd->mv_vnode)_vhold((vd->mv_vnode), 0); | |||
1034 | taskqueue_enqueue(taskqueue_thread, &vd->mv_task); | |||
1035 | } | |||
1036 | mqnode_release(pn); | |||
1037 | mqnode_release(parent); | |||
1038 | } else | |||
1039 | error = ENOENT2; | |||
1040 | return (error); | |||
1041 | } | |||
1042 | ||||
1043 | #if 0 | |||
1044 | struct vop_remove_args { | |||
1045 | struct vnode *a_dvp; | |||
1046 | struct vnode *a_vp; | |||
1047 | struct componentname *a_cnp; | |||
1048 | }; | |||
1049 | #endif | |||
1050 | ||||
1051 | /* | |||
1052 | * vnode removal operation | |||
1053 | */ | |||
1054 | static int | |||
1055 | mqfs_remove(struct vop_remove_args *ap) | |||
1056 | { | |||
1057 | struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount)((struct mqfs_info *)((ap->a_dvp->v_mount)->mnt_data )); | |||
1058 | struct mqfs_node *pn; | |||
1059 | int error; | |||
1060 | ||||
1061 | if (ap->a_vp->v_type == VDIR) | |||
1062 | return (EPERM1); | |||
1063 | pn = VTON(ap->a_vp)(((struct mqfs_vdata *)((ap->a_vp)->v_data))->mv_node ); | |||
1064 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1064)); | |||
1065 | error = do_unlink(pn, ap->a_cnp->cn_cred); | |||
1066 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1066)); | |||
1067 | return (error); | |||
1068 | } | |||
1069 | ||||
1070 | #if 0 | |||
1071 | struct vop_inactive_args { | |||
1072 | struct vnode *a_vp; | |||
1073 | struct thread *a_td; | |||
1074 | }; | |||
1075 | #endif | |||
1076 | ||||
1077 | static int | |||
1078 | mqfs_inactive(struct vop_inactive_args *ap) | |||
1079 | { | |||
1080 | struct mqfs_node *pn = VTON(ap->a_vp)(((struct mqfs_vdata *)((ap->a_vp)->v_data))->mv_node ); | |||
1081 | ||||
1082 | if (pn->mn_deleted) | |||
1083 | vrecycle(ap->a_vp); | |||
1084 | return (0); | |||
1085 | } | |||
1086 | ||||
1087 | #if 0 | |||
1088 | struct vop_reclaim_args { | |||
1089 | struct vop_generic_args a_gen; | |||
1090 | struct vnode *a_vp; | |||
1091 | struct thread *a_td; | |||
1092 | }; | |||
1093 | #endif | |||
1094 | ||||
1095 | static int | |||
1096 | mqfs_reclaim(struct vop_reclaim_args *ap) | |||
1097 | { | |||
1098 | struct mqfs_info *mqfs = VFSTOMQFS(ap->a_vp->v_mount)((struct mqfs_info *)((ap->a_vp->v_mount)->mnt_data) ); | |||
1099 | struct vnode *vp = ap->a_vp; | |||
1100 | struct mqfs_node *pn; | |||
1101 | struct mqfs_vdata *vd; | |||
1102 | ||||
1103 | vd = vp->v_data; | |||
1104 | pn = vd->mv_node; | |||
1105 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1105)); | |||
1106 | vp->v_data = NULL((void *)0); | |||
1107 | LIST_REMOVE(vd, mv_link)do { ; ; ; ; if ((((vd))->mv_link.le_next) != ((void *)0)) (((vd))->mv_link.le_next)->mv_link.le_prev = (vd)-> mv_link.le_prev; *(vd)->mv_link.le_prev = (((vd))->mv_link .le_next); ; ; } while (0); | |||
1108 | uma_zfree(mvdata_zone, vd); | |||
1109 | mqnode_release(pn); | |||
1110 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1110)); | |||
1111 | return (0); | |||
1112 | } | |||
1113 | ||||
1114 | #if 0 | |||
1115 | struct vop_open_args { | |||
1116 | struct vop_generic_args a_gen; | |||
1117 | struct vnode *a_vp; | |||
1118 | int a_mode; | |||
1119 | struct ucred *a_cred; | |||
1120 | struct thread *a_td; | |||
1121 | struct file *a_fp; | |||
1122 | }; | |||
1123 | #endif | |||
1124 | ||||
1125 | static int | |||
1126 | mqfs_open(struct vop_open_args *ap) | |||
1127 | { | |||
1128 | return (0); | |||
1129 | } | |||
1130 | ||||
1131 | #if 0 | |||
1132 | struct vop_close_args { | |||
1133 | struct vop_generic_args a_gen; | |||
1134 | struct vnode *a_vp; | |||
1135 | int a_fflag; | |||
1136 | struct ucred *a_cred; | |||
1137 | struct thread *a_td; | |||
1138 | }; | |||
1139 | #endif | |||
1140 | ||||
1141 | static int | |||
1142 | mqfs_close(struct vop_close_args *ap) | |||
1143 | { | |||
1144 | return (0); | |||
1145 | } | |||
1146 | ||||
1147 | #if 0 | |||
1148 | struct vop_access_args { | |||
1149 | struct vop_generic_args a_gen; | |||
1150 | struct vnode *a_vp; | |||
1151 | accmode_t a_accmode; | |||
1152 | struct ucred *a_cred; | |||
1153 | struct thread *a_td; | |||
1154 | }; | |||
1155 | #endif | |||
1156 | ||||
1157 | /* | |||
1158 | * Verify permissions | |||
1159 | */ | |||
1160 | static int | |||
1161 | mqfs_access(struct vop_access_args *ap) | |||
1162 | { | |||
1163 | struct vnode *vp = ap->a_vp; | |||
1164 | struct vattr vattr; | |||
1165 | int error; | |||
1166 | ||||
1167 | error = VOP_GETATTR(vp, &vattr, ap->a_cred); | |||
1168 | if (error) | |||
1169 | return (error); | |||
1170 | error = vaccess(vp->v_type, vattr.va_mode, vattr.va_uid, | |||
1171 | vattr.va_gid, ap->a_accmode, ap->a_cred, NULL((void *)0)); | |||
1172 | return (error); | |||
1173 | } | |||
1174 | ||||
1175 | #if 0 | |||
1176 | struct vop_getattr_args { | |||
1177 | struct vop_generic_args a_gen; | |||
1178 | struct vnode *a_vp; | |||
1179 | struct vattr *a_vap; | |||
1180 | struct ucred *a_cred; | |||
1181 | }; | |||
1182 | #endif | |||
1183 | ||||
1184 | /* | |||
1185 | * Get file attributes | |||
1186 | */ | |||
1187 | static int | |||
1188 | mqfs_getattr(struct vop_getattr_args *ap) | |||
1189 | { | |||
1190 | struct vnode *vp = ap->a_vp; | |||
1191 | struct mqfs_node *pn = VTON(vp)(((struct mqfs_vdata *)((vp)->v_data))->mv_node); | |||
1192 | struct vattr *vap = ap->a_vap; | |||
1193 | int error = 0; | |||
1194 | ||||
1195 | vap->va_type = vp->v_type; | |||
1196 | vap->va_mode = pn->mn_mode; | |||
1197 | vap->va_nlink = 1; | |||
1198 | vap->va_uid = pn->mn_uid; | |||
1199 | vap->va_gid = pn->mn_gid; | |||
1200 | vap->va_fsid = vp->v_mount->mnt_stat.f_fsid.val[0]; | |||
1201 | vap->va_fileid = pn->mn_fileno; | |||
1202 | vap->va_size = 0; | |||
1203 | vap->va_blocksize = PAGE_SIZE(1<<12); | |||
1204 | vap->va_bytes = vap->va_size = 0; | |||
1205 | vap->va_atime = pn->mn_atime; | |||
1206 | vap->va_mtime = pn->mn_mtime; | |||
1207 | vap->va_ctime = pn->mn_ctime; | |||
1208 | vap->va_birthtime = pn->mn_birth; | |||
1209 | vap->va_gen = 0; | |||
1210 | vap->va_flags = 0; | |||
1211 | vap->va_rdev = NODEV(dev_t)(-1); | |||
1212 | vap->va_bytes = 0; | |||
1213 | vap->va_filerev = 0; | |||
1214 | return (error); | |||
1215 | } | |||
1216 | ||||
1217 | #if 0 | |||
1218 | struct vop_setattr_args { | |||
1219 | struct vop_generic_args a_gen; | |||
1220 | struct vnode *a_vp; | |||
1221 | struct vattr *a_vap; | |||
1222 | struct ucred *a_cred; | |||
1223 | }; | |||
1224 | #endif | |||
1225 | /* | |||
1226 | * Set attributes | |||
1227 | */ | |||
1228 | static int | |||
1229 | mqfs_setattr(struct vop_setattr_args *ap) | |||
1230 | { | |||
1231 | struct mqfs_node *pn; | |||
1232 | struct vattr *vap; | |||
1233 | struct vnode *vp; | |||
1234 | struct thread *td; | |||
1235 | int c, error; | |||
1236 | uid_t uid; | |||
1237 | gid_t gid; | |||
1238 | ||||
1239 | td = curthread(__curthread()); | |||
1240 | vap = ap->a_vap; | |||
1241 | vp = ap->a_vp; | |||
1242 | if ((vap->va_type != VNON) || | |||
1243 | (vap->va_nlink != VNOVAL(-1)) || | |||
1244 | (vap->va_fsid != VNOVAL(-1)) || | |||
1245 | (vap->va_fileid != VNOVAL(-1)) || | |||
1246 | (vap->va_blocksize != VNOVAL(-1)) || | |||
1247 | (vap->va_flags != VNOVAL(-1) && vap->va_flags != 0) || | |||
1248 | (vap->va_rdev != VNOVAL(-1)) || | |||
1249 | ((int)vap->va_bytes != VNOVAL(-1)) || | |||
1250 | (vap->va_gen != VNOVAL(-1))) { | |||
1251 | return (EINVAL22); | |||
1252 | } | |||
1253 | ||||
1254 | pn = VTON(vp)(((struct mqfs_vdata *)((vp)->v_data))->mv_node); | |||
1255 | ||||
1256 | error = c = 0; | |||
1257 | if (vap->va_uid == (uid_t)VNOVAL(-1)) | |||
1258 | uid = pn->mn_uid; | |||
1259 | else | |||
1260 | uid = vap->va_uid; | |||
1261 | if (vap->va_gid == (gid_t)VNOVAL(-1)) | |||
1262 | gid = pn->mn_gid; | |||
1263 | else | |||
1264 | gid = vap->va_gid; | |||
1265 | ||||
1266 | if (uid != pn->mn_uid || gid != pn->mn_gid) { | |||
1267 | /* | |||
1268 | * To modify the ownership of a file, must possess VADMIN | |||
1269 | * for that file. | |||
1270 | */ | |||
1271 | if ((error = VOP_ACCESS(vp, VADMIN000000010000, ap->a_cred, td))) | |||
1272 | return (error); | |||
1273 | ||||
1274 | /* | |||
1275 | * XXXRW: Why is there a privilege check here: shouldn't the | |||
1276 | * check in VOP_ACCESS() be enough? Also, are the group bits | |||
1277 | * below definitely right? | |||
1278 | */ | |||
1279 | if (((ap->a_cred->cr_uid != pn->mn_uid) || uid != pn->mn_uid || | |||
1280 | (gid != pn->mn_gid && !groupmember(gid, ap->a_cred))) && | |||
1281 | (error = priv_check(td, PRIV_MQ_ADMIN180)) != 0) | |||
1282 | return (error); | |||
1283 | pn->mn_uid = uid; | |||
1284 | pn->mn_gid = gid; | |||
1285 | c = 1; | |||
1286 | } | |||
1287 | ||||
1288 | if (vap->va_mode != (mode_t)VNOVAL(-1)) { | |||
1289 | if ((ap->a_cred->cr_uid != pn->mn_uid) && | |||
1290 | (error = priv_check(td, PRIV_MQ_ADMIN180))) | |||
1291 | return (error); | |||
1292 | pn->mn_mode = vap->va_mode; | |||
1293 | c = 1; | |||
1294 | } | |||
1295 | ||||
1296 | if (vap->va_atime.tv_sec != VNOVAL(-1) || vap->va_mtime.tv_sec != VNOVAL(-1)) { | |||
1297 | /* See the comment in ufs_vnops::ufs_setattr(). */ | |||
1298 | if ((error = VOP_ACCESS(vp, VADMIN000000010000, ap->a_cred, td)) && | |||
1299 | ((vap->va_vaflags & VA_UTIMES_NULL0x01) == 0 || | |||
1300 | (error = VOP_ACCESS(vp, VWRITE000000000200, ap->a_cred, td)))) | |||
1301 | return (error); | |||
1302 | if (vap->va_atime.tv_sec != VNOVAL(-1)) { | |||
1303 | pn->mn_atime = vap->va_atime; | |||
1304 | } | |||
1305 | if (vap->va_mtime.tv_sec != VNOVAL(-1)) { | |||
1306 | pn->mn_mtime = vap->va_mtime; | |||
1307 | } | |||
1308 | c = 1; | |||
1309 | } | |||
1310 | if (c) { | |||
1311 | vfs_timestamp(&pn->mn_ctime); | |||
1312 | } | |||
1313 | return (0); | |||
1314 | } | |||
1315 | ||||
1316 | #if 0 | |||
1317 | struct vop_read_args { | |||
1318 | struct vop_generic_args a_gen; | |||
1319 | struct vnode *a_vp; | |||
1320 | struct uio *a_uio; | |||
1321 | int a_ioflag; | |||
1322 | struct ucred *a_cred; | |||
1323 | }; | |||
1324 | #endif | |||
1325 | ||||
1326 | /* | |||
1327 | * Read from a file | |||
1328 | */ | |||
1329 | static int | |||
1330 | mqfs_read(struct vop_read_args *ap) | |||
1331 | { | |||
1332 | char buf[80]; | |||
1333 | struct vnode *vp = ap->a_vp; | |||
1334 | struct uio *uio = ap->a_uio; | |||
1335 | struct mqfs_node *pn; | |||
1336 | struct mqueue *mq; | |||
1337 | int len, error; | |||
1338 | ||||
1339 | if (vp->v_type != VREG) | |||
1340 | return (EINVAL22); | |||
1341 | ||||
1342 | pn = VTON(vp)(((struct mqfs_vdata *)((vp)->v_data))->mv_node); | |||
1343 | mq = VTOMQ(vp)((struct mqueue *)((((struct mqfs_vdata *)((vp)->v_data))-> mv_node)->mn_data)); | |||
1344 | snprintf(buf, sizeof(buf), | |||
1345 | "QSIZE:%-10ld MAXMSG:%-10ld CURMSG:%-10ld MSGSIZE:%-10ld\n", | |||
1346 | mq->mq_totalbytes, | |||
1347 | mq->mq_maxmsg, | |||
1348 | mq->mq_curmsgs, | |||
1349 | mq->mq_msgsize); | |||
1350 | buf[sizeof(buf)-1] = '\0'; | |||
1351 | len = strlen(buf); | |||
1352 | error = uiomove_frombuf(buf, len, uio); | |||
1353 | return (error); | |||
1354 | } | |||
1355 | ||||
1356 | #if 0 | |||
1357 | struct vop_readdir_args { | |||
1358 | struct vop_generic_args a_gen; | |||
1359 | struct vnode *a_vp; | |||
1360 | struct uio *a_uio; | |||
1361 | struct ucred *a_cred; | |||
1362 | int *a_eofflag; | |||
1363 | int *a_ncookies; | |||
1364 | u_long **a_cookies; | |||
1365 | }; | |||
1366 | #endif | |||
1367 | ||||
1368 | /* | |||
1369 | * Return directory entries. | |||
1370 | */ | |||
1371 | static int | |||
1372 | mqfs_readdir(struct vop_readdir_args *ap) | |||
1373 | { | |||
1374 | struct vnode *vp; | |||
1375 | struct mqfs_info *mi; | |||
1376 | struct mqfs_node *pd; | |||
1377 | struct mqfs_node *pn; | |||
1378 | struct dirent entry; | |||
1379 | struct uio *uio; | |||
1380 | const void *pr_root; | |||
1381 | int *tmp_ncookies = NULL((void *)0); | |||
1382 | off_t offset; | |||
1383 | int error, i; | |||
1384 | ||||
1385 | vp = ap->a_vp; | |||
1386 | mi = VFSTOMQFS(vp->v_mount)((struct mqfs_info *)((vp->v_mount)->mnt_data)); | |||
1387 | pd = VTON(vp)(((struct mqfs_vdata *)((vp)->v_data))->mv_node); | |||
1388 | uio = ap->a_uio; | |||
1389 | ||||
1390 | if (vp->v_type != VDIR) | |||
1391 | return (ENOTDIR20); | |||
1392 | ||||
1393 | if (uio->uio_offset < 0) | |||
1394 | return (EINVAL22); | |||
1395 | ||||
1396 | if (ap->a_ncookies != NULL((void *)0)) { | |||
1397 | tmp_ncookies = ap->a_ncookies; | |||
1398 | *ap->a_ncookies = 0; | |||
1399 | ap->a_ncookies = NULL((void *)0); | |||
1400 | } | |||
1401 | ||||
1402 | error = 0; | |||
1403 | offset = 0; | |||
1404 | ||||
1405 | pr_root = ap->a_cred->cr_prison->pr_root; | |||
1406 | sx_xlock(&mi->mi_lock)(void)_sx_xlock(((&mi->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1406)); | |||
1407 | ||||
1408 | LIST_FOREACH(pn, &pd->mn_children, mn_sibling)for ((pn) = (((&pd->mn_children))->lh_first); (pn); (pn) = (((pn))->mn_sibling.le_next)) { | |||
1409 | entry.d_reclen = sizeof(entry); | |||
1410 | ||||
1411 | /* | |||
1412 | * Only show names within the same prison root directory | |||
1413 | * (or not associated with a prison, e.g. "." and ".."). | |||
1414 | */ | |||
1415 | if (pn->mn_pr_root != NULL((void *)0) && pn->mn_pr_root != pr_root) | |||
1416 | continue; | |||
1417 | if (!pn->mn_fileno) | |||
1418 | mqfs_fileno_alloc(mi, pn); | |||
1419 | entry.d_fileno = pn->mn_fileno; | |||
1420 | for (i = 0; i < MQFS_NAMELEN255 - 1 && pn->mn_name[i] != '\0'; ++i) | |||
1421 | entry.d_name[i] = pn->mn_name[i]; | |||
1422 | entry.d_name[i] = 0; | |||
1423 | entry.d_namlen = i; | |||
1424 | switch (pn->mn_type) { | |||
1425 | case mqfstype_root: | |||
1426 | case mqfstype_dir: | |||
1427 | case mqfstype_this: | |||
1428 | case mqfstype_parent: | |||
1429 | entry.d_type = DT_DIR4; | |||
1430 | break; | |||
1431 | case mqfstype_file: | |||
1432 | entry.d_type = DT_REG8; | |||
1433 | break; | |||
1434 | case mqfstype_symlink: | |||
1435 | entry.d_type = DT_LNK10; | |||
1436 | break; | |||
1437 | default: | |||
1438 | panic("%s has unexpected node type: %d", pn->mn_name, | |||
1439 | pn->mn_type); | |||
1440 | } | |||
1441 | if (entry.d_reclen > uio->uio_resid) | |||
1442 | break; | |||
1443 | if (offset >= uio->uio_offset) { | |||
1444 | error = vfs_read_dirent(ap, &entry, offset); | |||
1445 | if (error) | |||
1446 | break; | |||
1447 | } | |||
1448 | offset += entry.d_reclen; | |||
1449 | } | |||
1450 | sx_xunlock(&mi->mi_lock)_sx_xunlock(((&mi->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1450)); | |||
1451 | ||||
1452 | uio->uio_offset = offset; | |||
1453 | ||||
1454 | if (tmp_ncookies != NULL((void *)0)) | |||
1455 | ap->a_ncookies = tmp_ncookies; | |||
1456 | ||||
1457 | return (error); | |||
1458 | } | |||
1459 | ||||
1460 | #ifdef notyet | |||
1461 | ||||
1462 | #if 0 | |||
1463 | struct vop_mkdir_args { | |||
1464 | struct vnode *a_dvp; | |||
1465 | struvt vnode **a_vpp; | |||
1466 | struvt componentname *a_cnp; | |||
1467 | struct vattr *a_vap; | |||
1468 | }; | |||
1469 | #endif | |||
1470 | ||||
1471 | /* | |||
1472 | * Create a directory. | |||
1473 | */ | |||
1474 | static int | |||
1475 | mqfs_mkdir(struct vop_mkdir_args *ap) | |||
1476 | { | |||
1477 | struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount)((struct mqfs_info *)((ap->a_dvp->v_mount)->mnt_data )); | |||
1478 | struct componentname *cnp = ap->a_cnp; | |||
1479 | struct mqfs_node *pd = VTON(ap->a_dvp)(((struct mqfs_vdata *)((ap->a_dvp)->v_data))->mv_node ); | |||
1480 | struct mqfs_node *pn; | |||
1481 | int error; | |||
1482 | ||||
1483 | if (pd->mn_type != mqfstype_root && pd->mn_type != mqfstype_dir) | |||
1484 | return (ENOTDIR20); | |||
1485 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1485)); | |||
1486 | if ((cnp->cn_flags & HASBUF0x00000400) == 0) | |||
1487 | panic("%s: no name", __func__); | |||
1488 | pn = mqfs_create_dir(pd, cnp->cn_nameptr, cnp->cn_namelen, | |||
1489 | ap->a_vap->cn_cred, ap->a_vap->va_mode); | |||
1490 | if (pn != NULL((void *)0)) | |||
1491 | mqnode_addref(pn); | |||
1492 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1492)); | |||
1493 | if (pn == NULL((void *)0)) { | |||
1494 | error = ENOSPC28; | |||
1495 | } else { | |||
1496 | error = mqfs_allocv(ap->a_dvp->v_mount, ap->a_vpp, pn); | |||
1497 | mqnode_release(pn); | |||
1498 | } | |||
1499 | return (error); | |||
1500 | } | |||
1501 | ||||
1502 | #if 0 | |||
1503 | struct vop_rmdir_args { | |||
1504 | struct vnode *a_dvp; | |||
1505 | struct vnode *a_vp; | |||
1506 | struct componentname *a_cnp; | |||
1507 | }; | |||
1508 | #endif | |||
1509 | ||||
1510 | /* | |||
1511 | * Remove a directory. | |||
1512 | */ | |||
1513 | static int | |||
1514 | mqfs_rmdir(struct vop_rmdir_args *ap) | |||
1515 | { | |||
1516 | struct mqfs_info *mqfs = VFSTOMQFS(ap->a_dvp->v_mount)((struct mqfs_info *)((ap->a_dvp->v_mount)->mnt_data )); | |||
1517 | struct mqfs_node *pn = VTON(ap->a_vp)(((struct mqfs_vdata *)((ap->a_vp)->v_data))->mv_node ); | |||
1518 | struct mqfs_node *pt; | |||
1519 | ||||
1520 | if (pn->mn_type != mqfstype_dir) | |||
1521 | return (ENOTDIR20); | |||
1522 | ||||
1523 | sx_xlock(&mqfs->mi_lock)(void)_sx_xlock(((&mqfs->mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1523)); | |||
1524 | if (pn->mn_deleted) { | |||
1525 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1525)); | |||
1526 | return (ENOENT2); | |||
1527 | } | |||
1528 | ||||
1529 | pt = LIST_FIRST(&pn->mn_children)((&pn->mn_children)->lh_first); | |||
1530 | pt = LIST_NEXT(pt, mn_sibling)((pt)->mn_sibling.le_next); | |||
1531 | pt = LIST_NEXT(pt, mn_sibling)((pt)->mn_sibling.le_next); | |||
1532 | if (pt != NULL((void *)0)) { | |||
1533 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1533)); | |||
1534 | return (ENOTEMPTY66); | |||
1535 | } | |||
1536 | pt = pn->mn_parent; | |||
1537 | pn->mn_parent = NULL((void *)0); | |||
1538 | pn->mn_deleted = 1; | |||
1539 | LIST_REMOVE(pn, mn_sibling)do { ; ; ; ; if ((((pn))->mn_sibling.le_next) != ((void *) 0)) (((pn))->mn_sibling.le_next)->mn_sibling.le_prev = ( pn)->mn_sibling.le_prev; *(pn)->mn_sibling.le_prev = (( (pn))->mn_sibling.le_next); ; ; } while (0); | |||
1540 | mqnode_release(pn); | |||
1541 | mqnode_release(pt); | |||
1542 | sx_xunlock(&mqfs->mi_lock)_sx_xunlock(((&mqfs->mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1542)); | |||
1543 | cache_purge(ap->a_vp); | |||
1544 | return (0); | |||
1545 | } | |||
1546 | ||||
1547 | #endif /* notyet */ | |||
1548 | ||||
1549 | /* | |||
1550 | * See if this prison root is obsolete, and clean up associated queues if it is. | |||
1551 | */ | |||
1552 | static int | |||
1553 | mqfs_prison_remove(void *obj, void *data __unused__attribute__((__unused__))) | |||
1554 | { | |||
1555 | const struct prison *pr = obj; | |||
1556 | const struct prison *tpr; | |||
1557 | struct mqfs_node *pn, *tpn; | |||
1558 | int found; | |||
1559 | ||||
1560 | found = 0; | |||
1561 | TAILQ_FOREACH(tpr, &allprison, pr_list)for ((tpr) = (((&allprison))->tqh_first); (tpr); (tpr) = (((tpr))->pr_list.tqe_next)) { | |||
1562 | if (tpr->pr_root == pr->pr_root && tpr != pr && tpr->pr_ref > 0) | |||
1563 | found = 1; | |||
1564 | } | |||
1565 | if (!found) { | |||
1566 | /* | |||
1567 | * No jails are rooted in this directory anymore, | |||
1568 | * so no queues should be either. | |||
1569 | */ | |||
1570 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1570)); | |||
1571 | LIST_FOREACH_SAFE(pn, &mqfs_data.mi_root->mn_children,for ((pn) = (((&mqfs_data.mi_root->mn_children))->lh_first ); (pn) && ((tpn) = (((pn))->mn_sibling.le_next), 1 ); (pn) = (tpn)) | |||
1572 | mn_sibling, tpn)for ((pn) = (((&mqfs_data.mi_root->mn_children))->lh_first ); (pn) && ((tpn) = (((pn))->mn_sibling.le_next), 1 ); (pn) = (tpn)) { | |||
1573 | if (pn->mn_pr_root == pr->pr_root) | |||
1574 | (void)do_unlink(pn, curthread(__curthread())->td_ucred); | |||
1575 | } | |||
1576 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1576)); | |||
1577 | } | |||
1578 | return (0); | |||
1579 | } | |||
1580 | ||||
1581 | /* | |||
1582 | * Allocate a message queue | |||
1583 | */ | |||
1584 | static struct mqueue * | |||
1585 | mqueue_alloc(const struct mq_attr *attr) | |||
1586 | { | |||
1587 | struct mqueue *mq; | |||
1588 | ||||
1589 | if (curmq >= maxmq) | |||
1590 | return (NULL((void *)0)); | |||
1591 | mq = uma_zalloc(mqueue_zone, M_WAITOK0x0002 | M_ZERO0x0100); | |||
1592 | TAILQ_INIT(&mq->mq_msgq)do { (((&mq->mq_msgq))->tqh_first) = ((void *)0); ( &mq->mq_msgq)->tqh_last = &(((&mq->mq_msgq ))->tqh_first); ; } while (0); | |||
1593 | if (attr != NULL((void *)0)) { | |||
1594 | mq->mq_maxmsg = attr->mq_maxmsg; | |||
1595 | mq->mq_msgsize = attr->mq_msgsize; | |||
1596 | } else { | |||
1597 | mq->mq_maxmsg = default_maxmsg; | |||
1598 | mq->mq_msgsize = default_msgsize; | |||
1599 | } | |||
1600 | mtx_init(&mq->mq_mutex, "mqueue lock", NULL, MTX_DEF)_mtx_init(&(&mq->mq_mutex)->mtx_lock, "mqueue lock" , ((void *)0), 0x00000000); | |||
1601 | knlist_init_mtx(&mq->mq_rsel.si_note, &mq->mq_mutex); | |||
1602 | knlist_init_mtx(&mq->mq_wsel.si_note, &mq->mq_mutex); | |||
1603 | atomic_add_int(&curmq, 1); | |||
1604 | return (mq); | |||
1605 | } | |||
1606 | ||||
1607 | /* | |||
1608 | * Destroy a message queue | |||
1609 | */ | |||
1610 | static void | |||
1611 | mqueue_free(struct mqueue *mq) | |||
1612 | { | |||
1613 | struct mqueue_msg *msg; | |||
1614 | ||||
1615 | while ((msg = TAILQ_FIRST(&mq->mq_msgq)((&mq->mq_msgq)->tqh_first)) != NULL((void *)0)) { | |||
1616 | TAILQ_REMOVE(&mq->mq_msgq, msg, msg_link)do { ; ; ; ; if (((((msg))->msg_link.tqe_next)) != ((void * )0)) (((msg))->msg_link.tqe_next)->msg_link.tqe_prev = ( msg)->msg_link.tqe_prev; else { (&mq->mq_msgq)-> tqh_last = (msg)->msg_link.tqe_prev; ; } *(msg)->msg_link .tqe_prev = (((msg))->msg_link.tqe_next); ; ; ; } while (0 ); | |||
1617 | free(msg, M_MQUEUEDATA); | |||
1618 | } | |||
1619 | ||||
1620 | mtx_destroy(&mq->mq_mutex)_mtx_destroy(&(&mq->mq_mutex)->mtx_lock); | |||
1621 | seldrain(&mq->mq_rsel); | |||
1622 | seldrain(&mq->mq_wsel); | |||
1623 | knlist_destroy(&mq->mq_rsel.si_note); | |||
1624 | knlist_destroy(&mq->mq_wsel.si_note); | |||
1625 | uma_zfree(mqueue_zone, mq); | |||
1626 | atomic_add_int(&curmq, -1); | |||
1627 | } | |||
1628 | ||||
1629 | /* | |||
1630 | * Load a message from user space | |||
1631 | */ | |||
1632 | static struct mqueue_msg * | |||
1633 | mqueue_loadmsg(const char *msg_ptr, size_t msg_size, int msg_prio) | |||
1634 | { | |||
1635 | struct mqueue_msg *msg; | |||
1636 | size_t len; | |||
1637 | int error; | |||
1638 | ||||
1639 | len = sizeof(struct mqueue_msg) + msg_size; | |||
1640 | msg = malloc(len, M_MQUEUEDATA, M_WAITOK0x0002); | |||
1641 | error = copyin(msg_ptr, ((char *)msg) + sizeof(struct mqueue_msg), | |||
1642 | msg_size); | |||
1643 | if (error) { | |||
1644 | free(msg, M_MQUEUEDATA); | |||
1645 | msg = NULL((void *)0); | |||
1646 | } else { | |||
1647 | msg->msg_size = msg_size; | |||
1648 | msg->msg_prio = msg_prio; | |||
1649 | } | |||
1650 | return (msg); | |||
1651 | } | |||
1652 | ||||
1653 | /* | |||
1654 | * Save a message to user space | |||
1655 | */ | |||
1656 | static int | |||
1657 | mqueue_savemsg(struct mqueue_msg *msg, char *msg_ptr, int *msg_prio) | |||
1658 | { | |||
1659 | int error; | |||
1660 | ||||
1661 | error = copyout(((char *)msg) + sizeof(*msg), msg_ptr, | |||
1662 | msg->msg_size); | |||
1663 | if (error == 0 && msg_prio != NULL((void *)0)) | |||
1664 | error = copyout(&msg->msg_prio, msg_prio, sizeof(int)); | |||
1665 | return (error); | |||
1666 | } | |||
1667 | ||||
1668 | /* | |||
1669 | * Free a message's memory | |||
1670 | */ | |||
1671 | static __inline void | |||
1672 | mqueue_freemsg(struct mqueue_msg *msg) | |||
1673 | { | |||
1674 | free(msg, M_MQUEUEDATA); | |||
1675 | } | |||
1676 | ||||
1677 | /* | |||
1678 | * Send a message. if waitok is false, thread will not be | |||
1679 | * blocked if there is no data in queue, otherwise, absolute | |||
1680 | * time will be checked. | |||
1681 | */ | |||
1682 | int | |||
1683 | mqueue_send(struct mqueue *mq, const char *msg_ptr, | |||
1684 | size_t msg_len, unsigned msg_prio, int waitok, | |||
1685 | const struct timespec *abs_timeout) | |||
1686 | { | |||
1687 | struct mqueue_msg *msg; | |||
1688 | struct timespec ts, ts2; | |||
1689 | struct timeval tv; | |||
1690 | int error; | |||
1691 | ||||
1692 | if (msg_prio >= MQ_PRIO_MAX64) | |||
1693 | return (EINVAL22); | |||
1694 | if (msg_len > mq->mq_msgsize) | |||
1695 | return (EMSGSIZE40); | |||
1696 | msg = mqueue_loadmsg(msg_ptr, msg_len, msg_prio); | |||
1697 | if (msg == NULL((void *)0)) | |||
1698 | return (EFAULT14); | |||
1699 | ||||
1700 | /* O_NONBLOCK case */ | |||
1701 | if (!waitok) { | |||
1702 | error = _mqueue_send(mq, msg, -1); | |||
1703 | if (error) | |||
1704 | goto bad; | |||
1705 | return (0); | |||
1706 | } | |||
1707 | ||||
1708 | /* we allow a null timeout (wait forever) */ | |||
1709 | if (abs_timeout == NULL((void *)0)) { | |||
1710 | error = _mqueue_send(mq, msg, 0); | |||
1711 | if (error) | |||
1712 | goto bad; | |||
1713 | return (0); | |||
1714 | } | |||
1715 | ||||
1716 | /* send it before checking time */ | |||
1717 | error = _mqueue_send(mq, msg, -1); | |||
1718 | if (error == 0) | |||
1719 | return (0); | |||
1720 | ||||
1721 | if (error != EAGAIN35) | |||
1722 | goto bad; | |||
1723 | ||||
1724 | if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) { | |||
1725 | error = EINVAL22; | |||
1726 | goto bad; | |||
1727 | } | |||
1728 | for (;;) { | |||
1729 | ts2 = *abs_timeout; | |||
1730 | getnanotime(&ts); | |||
1731 | timespecsub(&ts2, &ts)do { (&ts2)->tv_sec -= (&ts)->tv_sec; (&ts2 )->tv_nsec -= (&ts)->tv_nsec; if ((&ts2)->tv_nsec < 0) { (&ts2)->tv_sec--; (&ts2)->tv_nsec += 1000000000; } } while (0); | |||
1732 | if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { | |||
1733 | error = ETIMEDOUT60; | |||
1734 | break; | |||
1735 | } | |||
1736 | TIMESPEC_TO_TIMEVAL(&tv, &ts2)do { (&tv)->tv_sec = (&ts2)->tv_sec; (&tv)-> tv_usec = (&ts2)->tv_nsec / 1000; } while (0); | |||
1737 | error = _mqueue_send(mq, msg, tvtohz(&tv)); | |||
1738 | if (error != ETIMEDOUT60) | |||
1739 | break; | |||
1740 | } | |||
1741 | if (error == 0) | |||
1742 | return (0); | |||
1743 | bad: | |||
1744 | mqueue_freemsg(msg); | |||
1745 | return (error); | |||
1746 | } | |||
1747 | ||||
1748 | /* | |||
1749 | * Common routine to send a message | |||
1750 | */ | |||
1751 | static int | |||
1752 | _mqueue_send(struct mqueue *mq, struct mqueue_msg *msg, int timo) | |||
1753 | { | |||
1754 | struct mqueue_msg *msg2; | |||
1755 | int error = 0; | |||
1756 | ||||
1757 | mtx_lock(&mq->mq_mutex)__mtx_lock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1757)); | |||
1758 | while (mq->mq_curmsgs >= mq->mq_maxmsg && error == 0) { | |||
1759 | if (timo < 0) { | |||
1760 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1760)); | |||
1761 | return (EAGAIN35); | |||
1762 | } | |||
1763 | mq->mq_senders++; | |||
1764 | error = msleep(&mq->mq_senders, &mq->mq_mutex,_sleep((&mq->mq_senders), &(&mq->mq_mutex)-> lock_object, (0x100), ("mqsend"), tick_sbt * (timo), 0, 0x0100 ) | |||
1765 | PCATCH, "mqsend", timo)_sleep((&mq->mq_senders), &(&mq->mq_mutex)-> lock_object, (0x100), ("mqsend"), tick_sbt * (timo), 0, 0x0100 ); | |||
1766 | mq->mq_senders--; | |||
1767 | if (error == EAGAIN35) | |||
1768 | error = ETIMEDOUT60; | |||
1769 | } | |||
1770 | if (mq->mq_curmsgs >= mq->mq_maxmsg) { | |||
1771 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1771)); | |||
1772 | return (error); | |||
1773 | } | |||
1774 | error = 0; | |||
1775 | if (TAILQ_EMPTY(&mq->mq_msgq)((&mq->mq_msgq)->tqh_first == ((void *)0))) { | |||
1776 | TAILQ_INSERT_HEAD(&mq->mq_msgq, msg, msg_link)do { ; if (((((msg))->msg_link.tqe_next) = (((&mq-> mq_msgq))->tqh_first)) != ((void *)0)) (((&mq->mq_msgq ))->tqh_first)->msg_link.tqe_prev = &(((msg))->msg_link .tqe_next); else (&mq->mq_msgq)->tqh_last = &(( (msg))->msg_link.tqe_next); (((&mq->mq_msgq))->tqh_first ) = (msg); (msg)->msg_link.tqe_prev = &(((&mq-> mq_msgq))->tqh_first); ; ; } while (0); | |||
1777 | } else { | |||
1778 | if (msg->msg_prio <= TAILQ_LAST(&mq->mq_msgq, msgq)(*(((struct msgq *)((&mq->mq_msgq)->tqh_last))-> tqh_last))->msg_prio) { | |||
1779 | TAILQ_INSERT_TAIL(&mq->mq_msgq, msg, msg_link)do { ; (((msg))->msg_link.tqe_next) = ((void *)0); (msg)-> msg_link.tqe_prev = (&mq->mq_msgq)->tqh_last; *(& mq->mq_msgq)->tqh_last = (msg); (&mq->mq_msgq)-> tqh_last = &(((msg))->msg_link.tqe_next); ; ; } while ( 0); | |||
1780 | } else { | |||
1781 | TAILQ_FOREACH(msg2, &mq->mq_msgq, msg_link)for ((msg2) = (((&mq->mq_msgq))->tqh_first); (msg2) ; (msg2) = (((msg2))->msg_link.tqe_next)) { | |||
1782 | if (msg2->msg_prio < msg->msg_prio) | |||
1783 | break; | |||
1784 | } | |||
1785 | TAILQ_INSERT_BEFORE(msg2, msg, msg_link)do { ; (msg)->msg_link.tqe_prev = (msg2)->msg_link.tqe_prev ; (((msg))->msg_link.tqe_next) = (msg2); *(msg2)->msg_link .tqe_prev = (msg); (msg2)->msg_link.tqe_prev = &(((msg ))->msg_link.tqe_next); ; ; } while (0); | |||
1786 | } | |||
1787 | } | |||
1788 | mq->mq_curmsgs++; | |||
1789 | mq->mq_totalbytes += msg->msg_size; | |||
1790 | if (mq->mq_receivers) | |||
1791 | wakeup_one(&mq->mq_receivers); | |||
1792 | else if (mq->mq_notifier != NULL((void *)0)) | |||
1793 | mqueue_send_notification(mq); | |||
1794 | if (mq->mq_flags & MQ_RSEL0x01) { | |||
1795 | mq->mq_flags &= ~MQ_RSEL0x01; | |||
1796 | selwakeup(&mq->mq_rsel); | |||
1797 | } | |||
1798 | KNOTE_LOCKED(&mq->mq_rsel.si_note, 0)knote(&mq->mq_rsel.si_note, 0, 0x0001); | |||
1799 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1799)); | |||
1800 | return (0); | |||
1801 | } | |||
1802 | ||||
1803 | /* | |||
1804 | * Send realtime a signal to process which registered itself | |||
1805 | * successfully by mq_notify. | |||
1806 | */ | |||
1807 | static void | |||
1808 | mqueue_send_notification(struct mqueue *mq) | |||
1809 | { | |||
1810 | struct mqueue_notifier *nt; | |||
1811 | struct thread *td; | |||
1812 | struct proc *p; | |||
1813 | int error; | |||
1814 | ||||
1815 | mtx_assert(&mq->mq_mutex, MA_OWNED)(void)0; | |||
1816 | nt = mq->mq_notifier; | |||
1817 | if (nt->nt_sigev.sigev_notify != SIGEV_NONE0) { | |||
1818 | p = nt->nt_proc; | |||
1819 | error = sigev_findtd(p, &nt->nt_sigev, &td); | |||
1820 | if (error) { | |||
1821 | mq->mq_notifier = NULL((void *)0); | |||
1822 | return; | |||
1823 | } | |||
1824 | if (!KSI_ONQ(&nt->nt_ksi)((&nt->nt_ksi)->ksi_sigq != ((void *)0))) { | |||
1825 | ksiginfo_set_sigev(&nt->nt_ksi, &nt->nt_sigev); | |||
1826 | tdsendsignal(p, td, nt->nt_ksi.ksi_signoksi_info.si_signo, &nt->nt_ksi); | |||
1827 | } | |||
1828 | PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1828)); | |||
1829 | } | |||
1830 | mq->mq_notifier = NULL((void *)0); | |||
1831 | } | |||
1832 | ||||
1833 | /* | |||
1834 | * Get a message. if waitok is false, thread will not be | |||
1835 | * blocked if there is no data in queue, otherwise, absolute | |||
1836 | * time will be checked. | |||
1837 | */ | |||
1838 | int | |||
1839 | mqueue_receive(struct mqueue *mq, char *msg_ptr, | |||
1840 | size_t msg_len, unsigned *msg_prio, int waitok, | |||
1841 | const struct timespec *abs_timeout) | |||
1842 | { | |||
1843 | struct mqueue_msg *msg; | |||
1844 | struct timespec ts, ts2; | |||
1845 | struct timeval tv; | |||
1846 | int error; | |||
1847 | ||||
1848 | if (msg_len < mq->mq_msgsize) | |||
1849 | return (EMSGSIZE40); | |||
1850 | ||||
1851 | /* O_NONBLOCK case */ | |||
1852 | if (!waitok) { | |||
1853 | error = _mqueue_recv(mq, &msg, -1); | |||
1854 | if (error) | |||
1855 | return (error); | |||
1856 | goto received; | |||
1857 | } | |||
1858 | ||||
1859 | /* we allow a null timeout (wait forever). */ | |||
1860 | if (abs_timeout == NULL((void *)0)) { | |||
1861 | error = _mqueue_recv(mq, &msg, 0); | |||
1862 | if (error) | |||
1863 | return (error); | |||
1864 | goto received; | |||
1865 | } | |||
1866 | ||||
1867 | /* try to get a message before checking time */ | |||
1868 | error = _mqueue_recv(mq, &msg, -1); | |||
1869 | if (error == 0) | |||
1870 | goto received; | |||
1871 | ||||
1872 | if (error != EAGAIN35) | |||
1873 | return (error); | |||
1874 | ||||
1875 | if (abs_timeout->tv_nsec >= 1000000000 || abs_timeout->tv_nsec < 0) { | |||
1876 | error = EINVAL22; | |||
1877 | return (error); | |||
1878 | } | |||
1879 | ||||
1880 | for (;;) { | |||
1881 | ts2 = *abs_timeout; | |||
1882 | getnanotime(&ts); | |||
1883 | timespecsub(&ts2, &ts)do { (&ts2)->tv_sec -= (&ts)->tv_sec; (&ts2 )->tv_nsec -= (&ts)->tv_nsec; if ((&ts2)->tv_nsec < 0) { (&ts2)->tv_sec--; (&ts2)->tv_nsec += 1000000000; } } while (0); | |||
1884 | if (ts2.tv_sec < 0 || (ts2.tv_sec == 0 && ts2.tv_nsec <= 0)) { | |||
1885 | error = ETIMEDOUT60; | |||
1886 | return (error); | |||
1887 | } | |||
1888 | TIMESPEC_TO_TIMEVAL(&tv, &ts2)do { (&tv)->tv_sec = (&ts2)->tv_sec; (&tv)-> tv_usec = (&ts2)->tv_nsec / 1000; } while (0); | |||
1889 | error = _mqueue_recv(mq, &msg, tvtohz(&tv)); | |||
1890 | if (error == 0) | |||
1891 | break; | |||
1892 | if (error != ETIMEDOUT60) | |||
1893 | return (error); | |||
1894 | } | |||
1895 | ||||
1896 | received: | |||
1897 | error = mqueue_savemsg(msg, msg_ptr, msg_prio); | |||
1898 | if (error == 0) { | |||
1899 | curthread(__curthread())->td_retvaltd_uretoff.tdu_retval[0] = msg->msg_size; | |||
1900 | curthread(__curthread())->td_retvaltd_uretoff.tdu_retval[1] = 0; | |||
1901 | } | |||
1902 | mqueue_freemsg(msg); | |||
1903 | return (error); | |||
1904 | } | |||
1905 | ||||
1906 | /* | |||
1907 | * Common routine to receive a message | |||
1908 | */ | |||
1909 | static int | |||
1910 | _mqueue_recv(struct mqueue *mq, struct mqueue_msg **msg, int timo) | |||
1911 | { | |||
1912 | int error = 0; | |||
1913 | ||||
1914 | mtx_lock(&mq->mq_mutex)__mtx_lock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1914)); | |||
1915 | while ((*msg = TAILQ_FIRST(&mq->mq_msgq)((&mq->mq_msgq)->tqh_first)) == NULL((void *)0) && error == 0) { | |||
1916 | if (timo < 0) { | |||
1917 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1917)); | |||
1918 | return (EAGAIN35); | |||
1919 | } | |||
1920 | mq->mq_receivers++; | |||
1921 | error = msleep(&mq->mq_receivers, &mq->mq_mutex,_sleep((&mq->mq_receivers), &(&mq->mq_mutex )->lock_object, (0x100), ("mqrecv"), tick_sbt * (timo), 0, 0x0100) | |||
1922 | PCATCH, "mqrecv", timo)_sleep((&mq->mq_receivers), &(&mq->mq_mutex )->lock_object, (0x100), ("mqrecv"), tick_sbt * (timo), 0, 0x0100); | |||
1923 | mq->mq_receivers--; | |||
1924 | if (error == EAGAIN35) | |||
1925 | error = ETIMEDOUT60; | |||
1926 | } | |||
1927 | if (*msg != NULL((void *)0)) { | |||
1928 | error = 0; | |||
1929 | TAILQ_REMOVE(&mq->mq_msgq, *msg, msg_link)do { ; ; ; ; if (((((*msg))->msg_link.tqe_next)) != ((void *)0)) (((*msg))->msg_link.tqe_next)->msg_link.tqe_prev = (*msg)->msg_link.tqe_prev; else { (&mq->mq_msgq) ->tqh_last = (*msg)->msg_link.tqe_prev; ; } *(*msg)-> msg_link.tqe_prev = (((*msg))->msg_link.tqe_next); ; ; ; } while (0); | |||
1930 | mq->mq_curmsgs--; | |||
1931 | mq->mq_totalbytes -= (*msg)->msg_size; | |||
1932 | if (mq->mq_senders) | |||
1933 | wakeup_one(&mq->mq_senders); | |||
1934 | if (mq->mq_flags & MQ_WSEL0x02) { | |||
1935 | mq->mq_flags &= ~MQ_WSEL0x02; | |||
1936 | selwakeup(&mq->mq_wsel); | |||
1937 | } | |||
1938 | KNOTE_LOCKED(&mq->mq_wsel.si_note, 0)knote(&mq->mq_wsel.si_note, 0, 0x0001); | |||
1939 | } | |||
1940 | if (mq->mq_notifier != NULL((void *)0) && mq->mq_receivers == 0 && | |||
1941 | !TAILQ_EMPTY(&mq->mq_msgq)((&mq->mq_msgq)->tqh_first == ((void *)0))) { | |||
1942 | mqueue_send_notification(mq); | |||
1943 | } | |||
1944 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1944)); | |||
1945 | return (error); | |||
1946 | } | |||
1947 | ||||
1948 | static __inline struct mqueue_notifier * | |||
1949 | notifier_alloc(void) | |||
1950 | { | |||
1951 | return (uma_zalloc(mqnoti_zone, M_WAITOK0x0002 | M_ZERO0x0100)); | |||
1952 | } | |||
1953 | ||||
1954 | static __inline void | |||
1955 | notifier_free(struct mqueue_notifier *p) | |||
1956 | { | |||
1957 | uma_zfree(mqnoti_zone, p); | |||
1958 | } | |||
1959 | ||||
1960 | static struct mqueue_notifier * | |||
1961 | notifier_search(struct proc *p, int fd) | |||
1962 | { | |||
1963 | struct mqueue_notifier *nt; | |||
1964 | ||||
1965 | LIST_FOREACH(nt, &p->p_mqnotifier, nt_link)for ((nt) = (((&p->p_mqnotifier))->lh_first); (nt); (nt) = (((nt))->nt_link.le_next)) { | |||
1966 | if (nt->nt_ksi.ksi_mqdksi_info._reason._mesgq._mqd == fd) | |||
1967 | break; | |||
1968 | } | |||
1969 | return (nt); | |||
1970 | } | |||
1971 | ||||
1972 | static __inline void | |||
1973 | notifier_insert(struct proc *p, struct mqueue_notifier *nt) | |||
1974 | { | |||
1975 | LIST_INSERT_HEAD(&p->p_mqnotifier, nt, nt_link)do { ; if (((((nt))->nt_link.le_next) = (((&p->p_mqnotifier ))->lh_first)) != ((void *)0)) (((&p->p_mqnotifier) )->lh_first)->nt_link.le_prev = &(((nt))->nt_link .le_next); (((&p->p_mqnotifier))->lh_first) = (nt); (nt)->nt_link.le_prev = &(((&p->p_mqnotifier)) ->lh_first); } while (0); | |||
1976 | } | |||
1977 | ||||
1978 | static __inline void | |||
1979 | notifier_delete(struct proc *p, struct mqueue_notifier *nt) | |||
1980 | { | |||
1981 | LIST_REMOVE(nt, nt_link)do { ; ; ; ; if ((((nt))->nt_link.le_next) != ((void *)0)) (((nt))->nt_link.le_next)->nt_link.le_prev = (nt)-> nt_link.le_prev; *(nt)->nt_link.le_prev = (((nt))->nt_link .le_next); ; ; } while (0); | |||
1982 | notifier_free(nt); | |||
1983 | } | |||
1984 | ||||
1985 | static void | |||
1986 | notifier_remove(struct proc *p, struct mqueue *mq, int fd) | |||
1987 | { | |||
1988 | struct mqueue_notifier *nt; | |||
1989 | ||||
1990 | mtx_assert(&mq->mq_mutex, MA_OWNED)(void)0; | |||
1991 | PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1991)); | |||
1992 | nt = notifier_search(p, fd); | |||
1993 | if (nt != NULL((void *)0)) { | |||
1994 | if (mq->mq_notifier == nt) | |||
1995 | mq->mq_notifier = NULL((void *)0); | |||
1996 | sigqueue_take(&nt->nt_ksi); | |||
1997 | notifier_delete(p, nt); | |||
1998 | } | |||
1999 | PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (1999)); | |||
2000 | } | |||
2001 | ||||
2002 | static int | |||
2003 | kern_kmq_open(struct thread *td, const char *upath, int flags, mode_t mode, | |||
2004 | const struct mq_attr *attr) | |||
2005 | { | |||
2006 | char path[MQFS_NAMELEN255 + 1]; | |||
2007 | struct mqfs_node *pn; | |||
2008 | struct filedesc *fdp; | |||
2009 | struct file *fp; | |||
2010 | struct mqueue *mq; | |||
2011 | int fd, error, len, cmode; | |||
2012 | ||||
2013 | fdp = td->td_proc->p_fd; | |||
2014 | cmode = (((mode & ~fdp->fd_cmask) & ALLPERMS(0004000|0002000|0001000|0000700|0000070|0000007)) & ~S_ISTXT0001000); | |||
2015 | mq = NULL((void *)0); | |||
2016 | if ((flags & O_CREAT0x0200) != 0 && attr != NULL((void *)0)) { | |||
2017 | if (attr->mq_maxmsg <= 0 || attr->mq_maxmsg > maxmsg) | |||
2018 | return (EINVAL22); | |||
2019 | if (attr->mq_msgsize <= 0 || attr->mq_msgsize > maxmsgsize) | |||
2020 | return (EINVAL22); | |||
2021 | } | |||
2022 | ||||
2023 | error = copyinstr(upath, path, MQFS_NAMELEN255 + 1, NULL((void *)0)); | |||
2024 | if (error) | |||
2025 | return (error); | |||
2026 | ||||
2027 | /* | |||
2028 | * The first character of name must be a slash (/) character | |||
2029 | * and the remaining characters of name cannot include any slash | |||
2030 | * characters. | |||
2031 | */ | |||
2032 | len = strlen(path); | |||
2033 | if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL((void *)0)) | |||
2034 | return (EINVAL22); | |||
2035 | ||||
2036 | error = falloc(td, &fp, &fd, O_CLOEXEC)falloc_caps(td, &fp, &fd, 0x00100000, ((void *)0)); | |||
2037 | if (error) | |||
2038 | return (error); | |||
2039 | ||||
2040 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2040)); | |||
2041 | pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred); | |||
2042 | if (pn == NULL((void *)0)) { | |||
2043 | if (!(flags & O_CREAT0x0200)) { | |||
2044 | error = ENOENT2; | |||
2045 | } else { | |||
2046 | mq = mqueue_alloc(attr); | |||
2047 | if (mq == NULL((void *)0)) { | |||
2048 | error = ENFILE23; | |||
2049 | } else { | |||
2050 | pn = mqfs_create_file(mqfs_data.mi_root, | |||
2051 | path + 1, len - 1, td->td_ucred, | |||
2052 | cmode); | |||
2053 | if (pn == NULL((void *)0)) { | |||
2054 | error = ENOSPC28; | |||
2055 | mqueue_free(mq); | |||
2056 | } | |||
2057 | } | |||
2058 | } | |||
2059 | ||||
2060 | if (error == 0) { | |||
2061 | pn->mn_data = mq; | |||
2062 | } | |||
2063 | } else { | |||
2064 | if ((flags & (O_CREAT0x0200 | O_EXCL0x0800)) == (O_CREAT0x0200 | O_EXCL0x0800)) { | |||
2065 | error = EEXIST17; | |||
2066 | } else { | |||
2067 | accmode_t accmode = 0; | |||
2068 | ||||
2069 | if (flags & FREAD0x0001) | |||
2070 | accmode |= VREAD000000000400; | |||
2071 | if (flags & FWRITE0x0002) | |||
2072 | accmode |= VWRITE000000000200; | |||
2073 | error = vaccess(VREG, pn->mn_mode, pn->mn_uid, | |||
2074 | pn->mn_gid, accmode, td->td_ucred, NULL((void *)0)); | |||
2075 | } | |||
2076 | } | |||
2077 | ||||
2078 | if (error) { | |||
2079 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2079)); | |||
2080 | fdclose(td, fp, fd); | |||
2081 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2082 | return (error); | |||
2083 | } | |||
2084 | ||||
2085 | mqnode_addref(pn); | |||
2086 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2086)); | |||
2087 | ||||
2088 | finit(fp, flags & (FREAD0x0001 | FWRITE0x0002 | O_NONBLOCK0x0004), DTYPE_MQUEUE7, pn, | |||
2089 | &mqueueops); | |||
2090 | ||||
2091 | td->td_retvaltd_uretoff.tdu_retval[0] = fd; | |||
2092 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2093 | return (0); | |||
2094 | } | |||
2095 | ||||
2096 | /* | |||
2097 | * Syscall to open a message queue. | |||
2098 | */ | |||
2099 | int | |||
2100 | sys_kmq_open(struct thread *td, struct kmq_open_args *uap) | |||
2101 | { | |||
2102 | struct mq_attr attr; | |||
2103 | int flags, error; | |||
2104 | ||||
2105 | if ((uap->flags & O_ACCMODE0x0003) == O_ACCMODE0x0003 || uap->flags & O_EXEC0x00040000) | |||
2106 | return (EINVAL22); | |||
2107 | flags = FFLAGS(uap->flags)((uap->flags) & 0x00040000 ? (uap->flags) : (uap-> flags) + 1); | |||
2108 | if ((flags & O_CREAT0x0200) != 0 && uap->attr != NULL((void *)0)) { | |||
2109 | error = copyin(uap->attr, &attr, sizeof(attr)); | |||
2110 | if (error) | |||
2111 | return (error); | |||
2112 | } | |||
2113 | return (kern_kmq_open(td, uap->path, flags, uap->mode, | |||
2114 | uap->attr != NULL((void *)0) ? &attr : NULL((void *)0))); | |||
2115 | } | |||
2116 | ||||
2117 | /* | |||
2118 | * Syscall to unlink a message queue. | |||
2119 | */ | |||
2120 | int | |||
2121 | sys_kmq_unlink(struct thread *td, struct kmq_unlink_args *uap) | |||
2122 | { | |||
2123 | char path[MQFS_NAMELEN255+1]; | |||
2124 | struct mqfs_node *pn; | |||
2125 | int error, len; | |||
2126 | ||||
2127 | error = copyinstr(uap->path, path, MQFS_NAMELEN255 + 1, NULL((void *)0)); | |||
2128 | if (error) | |||
2129 | return (error); | |||
2130 | ||||
2131 | len = strlen(path); | |||
2132 | if (len < 2 || path[0] != '/' || strchr(path + 1, '/') != NULL((void *)0)) | |||
2133 | return (EINVAL22); | |||
2134 | ||||
2135 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2135)); | |||
2136 | pn = mqfs_search(mqfs_data.mi_root, path + 1, len - 1, td->td_ucred); | |||
2137 | if (pn != NULL((void *)0)) | |||
2138 | error = do_unlink(pn, td->td_ucred); | |||
2139 | else | |||
2140 | error = ENOENT2; | |||
2141 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2141)); | |||
2142 | return (error); | |||
2143 | } | |||
2144 | ||||
2145 | typedef int (*_fgetf)(struct thread *, int, cap_rights_t *, struct file **); | |||
2146 | ||||
2147 | /* | |||
2148 | * Get message queue by giving file slot | |||
2149 | */ | |||
2150 | static int | |||
2151 | _getmq(struct thread *td, int fd, cap_rights_t *rightsp, _fgetf func, | |||
2152 | struct file **fpp, struct mqfs_node **ppn, struct mqueue **pmq) | |||
2153 | { | |||
2154 | struct mqfs_node *pn; | |||
2155 | int error; | |||
2156 | ||||
2157 | error = func(td, fd, rightsp, fpp); | |||
2158 | if (error) | |||
2159 | return (error); | |||
2160 | if (&mqueueops != (*fpp)->f_ops) { | |||
2161 | fdrop(*fpp, td)(refcount_release(&(*fpp)->f_count) ? _fdrop((*fpp), ( td)) : _fnoop()); | |||
2162 | return (EBADF9); | |||
2163 | } | |||
2164 | pn = (*fpp)->f_data; | |||
2165 | if (ppn) | |||
2166 | *ppn = pn; | |||
2167 | if (pmq) | |||
2168 | *pmq = pn->mn_data; | |||
2169 | return (0); | |||
2170 | } | |||
2171 | ||||
2172 | static __inline int | |||
2173 | getmq(struct thread *td, int fd, struct file **fpp, struct mqfs_node **ppn, | |||
2174 | struct mqueue **pmq) | |||
2175 | { | |||
2176 | cap_rights_t rights; | |||
2177 | ||||
2178 | return _getmq(td, fd, cap_rights_init(&rights, CAP_EVENT)__cap_rights_init(0, &rights, ((1ULL << (57 + (1))) | (0x0000000000000020ULL)), 0ULL), fget, | |||
2179 | fpp, ppn, pmq); | |||
2180 | } | |||
2181 | ||||
2182 | static __inline int | |||
2183 | getmq_read(struct thread *td, int fd, struct file **fpp, | |||
2184 | struct mqfs_node **ppn, struct mqueue **pmq) | |||
2185 | { | |||
2186 | cap_rights_t rights; | |||
2187 | ||||
2188 | return _getmq(td, fd, cap_rights_init(&rights, CAP_READ)__cap_rights_init(0, &rights, ((1ULL << (57 + (0))) | (0x0000000000000001ULL)), 0ULL), fget_read, | |||
2189 | fpp, ppn, pmq); | |||
2190 | } | |||
2191 | ||||
2192 | static __inline int | |||
2193 | getmq_write(struct thread *td, int fd, struct file **fpp, | |||
2194 | struct mqfs_node **ppn, struct mqueue **pmq) | |||
2195 | { | |||
2196 | cap_rights_t rights; | |||
2197 | ||||
2198 | return _getmq(td, fd, cap_rights_init(&rights, CAP_WRITE)__cap_rights_init(0, &rights, ((1ULL << (57 + (0))) | (0x0000000000000002ULL)), 0ULL), fget_write, | |||
2199 | fpp, ppn, pmq); | |||
2200 | } | |||
2201 | ||||
2202 | static int | |||
2203 | kern_kmq_setattr(struct thread *td, int mqd, const struct mq_attr *attr, | |||
2204 | struct mq_attr *oattr) | |||
2205 | { | |||
2206 | struct mqueue *mq; | |||
2207 | struct file *fp; | |||
2208 | u_int oflag, flag; | |||
2209 | int error; | |||
2210 | ||||
2211 | if (attr != NULL((void *)0) && (attr->mq_flags & ~O_NONBLOCK0x0004) != 0) | |||
2212 | return (EINVAL22); | |||
2213 | error = getmq(td, mqd, &fp, NULL((void *)0), &mq); | |||
2214 | if (error) | |||
2215 | return (error); | |||
2216 | oattr->mq_maxmsg = mq->mq_maxmsg; | |||
2217 | oattr->mq_msgsize = mq->mq_msgsize; | |||
2218 | oattr->mq_curmsgs = mq->mq_curmsgs; | |||
2219 | if (attr != NULL((void *)0)) { | |||
2220 | do { | |||
2221 | oflag = flag = fp->f_flag; | |||
2222 | flag &= ~O_NONBLOCK0x0004; | |||
2223 | flag |= (attr->mq_flags & O_NONBLOCK0x0004); | |||
2224 | } while (atomic_cmpset_int(&fp->f_flag, oflag, flag) == 0); | |||
2225 | } else | |||
2226 | oflag = fp->f_flag; | |||
2227 | oattr->mq_flags = (O_NONBLOCK0x0004 & oflag); | |||
2228 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2229 | return (error); | |||
2230 | } | |||
2231 | ||||
2232 | int | |||
2233 | sys_kmq_setattr(struct thread *td, struct kmq_setattr_args *uap) | |||
2234 | { | |||
2235 | struct mq_attr attr, oattr; | |||
2236 | int error; | |||
2237 | ||||
2238 | if (uap->attr != NULL((void *)0)) { | |||
2239 | error = copyin(uap->attr, &attr, sizeof(attr)); | |||
2240 | if (error != 0) | |||
2241 | return (error); | |||
2242 | } | |||
2243 | error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL((void *)0) ? &attr : NULL((void *)0), | |||
2244 | &oattr); | |||
2245 | if (error != 0) | |||
2246 | return (error); | |||
2247 | if (uap->oattr != NULL((void *)0)) | |||
2248 | error = copyout(&oattr, uap->oattr, sizeof(oattr)); | |||
2249 | return (error); | |||
2250 | } | |||
2251 | ||||
2252 | int | |||
2253 | sys_kmq_timedreceive(struct thread *td, struct kmq_timedreceive_args *uap) | |||
2254 | { | |||
2255 | struct mqueue *mq; | |||
2256 | struct file *fp; | |||
2257 | struct timespec *abs_timeout, ets; | |||
2258 | int error; | |||
2259 | int waitok; | |||
2260 | ||||
2261 | error = getmq_read(td, uap->mqd, &fp, NULL((void *)0), &mq); | |||
2262 | if (error) | |||
2263 | return (error); | |||
2264 | if (uap->abs_timeout != NULL((void *)0)) { | |||
2265 | error = copyin(uap->abs_timeout, &ets, sizeof(ets)); | |||
2266 | if (error != 0) | |||
2267 | return (error); | |||
2268 | abs_timeout = &ets; | |||
2269 | } else | |||
2270 | abs_timeout = NULL((void *)0); | |||
2271 | waitok = !(fp->f_flag & O_NONBLOCK0x0004); | |||
2272 | error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, | |||
2273 | uap->msg_prio, waitok, abs_timeout); | |||
2274 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2275 | return (error); | |||
2276 | } | |||
2277 | ||||
2278 | int | |||
2279 | sys_kmq_timedsend(struct thread *td, struct kmq_timedsend_args *uap) | |||
2280 | { | |||
2281 | struct mqueue *mq; | |||
2282 | struct file *fp; | |||
2283 | struct timespec *abs_timeout, ets; | |||
2284 | int error, waitok; | |||
2285 | ||||
2286 | error = getmq_write(td, uap->mqd, &fp, NULL((void *)0), &mq); | |||
2287 | if (error) | |||
2288 | return (error); | |||
2289 | if (uap->abs_timeout != NULL((void *)0)) { | |||
2290 | error = copyin(uap->abs_timeout, &ets, sizeof(ets)); | |||
2291 | if (error != 0) | |||
2292 | return (error); | |||
2293 | abs_timeout = &ets; | |||
2294 | } else | |||
2295 | abs_timeout = NULL((void *)0); | |||
2296 | waitok = !(fp->f_flag & O_NONBLOCK0x0004); | |||
2297 | error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, | |||
2298 | uap->msg_prio, waitok, abs_timeout); | |||
2299 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2300 | return (error); | |||
2301 | } | |||
2302 | ||||
2303 | static int | |||
2304 | kern_kmq_notify(struct thread *td, int mqd, struct sigevent *sigev) | |||
2305 | { | |||
2306 | #ifdef CAPABILITIES1 | |||
2307 | cap_rights_t rights; | |||
2308 | #endif | |||
2309 | struct filedesc *fdp; | |||
2310 | struct proc *p; | |||
2311 | struct mqueue *mq; | |||
2312 | struct file *fp, *fp2; | |||
2313 | struct mqueue_notifier *nt, *newnt = NULL((void *)0); | |||
2314 | int error; | |||
2315 | ||||
2316 | if (sigev != NULL((void *)0)) { | |||
2317 | if (sigev->sigev_notify != SIGEV_SIGNAL1 && | |||
2318 | sigev->sigev_notify != SIGEV_THREAD_ID4 && | |||
2319 | sigev->sigev_notify != SIGEV_NONE0) | |||
2320 | return (EINVAL22); | |||
2321 | if ((sigev->sigev_notify == SIGEV_SIGNAL1 || | |||
2322 | sigev->sigev_notify == SIGEV_THREAD_ID4) && | |||
2323 | !_SIG_VALID(sigev->sigev_signo)((sigev->sigev_signo) <= 128 && (sigev->sigev_signo ) > 0)) | |||
2324 | return (EINVAL22); | |||
2325 | } | |||
2326 | p = td->td_proc; | |||
2327 | fdp = td->td_proc->p_fd; | |||
2328 | error = getmq(td, mqd, &fp, NULL((void *)0), &mq); | |||
2329 | if (error) | |||
2330 | return (error); | |||
2331 | again: | |||
2332 | FILEDESC_SLOCK(fdp)(void)_sx_slock(((&(fdp)->fd_sx)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2332)); | |||
2333 | fp2 = fget_locked(fdp, mqd); | |||
2334 | if (fp2 == NULL((void *)0)) { | |||
2335 | FILEDESC_SUNLOCK(fdp)_sx_sunlock(((&(fdp)->fd_sx)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2335)); | |||
2336 | error = EBADF9; | |||
2337 | goto out; | |||
2338 | } | |||
2339 | #ifdef CAPABILITIES1 | |||
2340 | error = cap_check(cap_rights(fdp, mqd), | |||
2341 | cap_rights_init(&rights, CAP_EVENT)__cap_rights_init(0, &rights, ((1ULL << (57 + (1))) | (0x0000000000000020ULL)), 0ULL)); | |||
2342 | if (error) { | |||
2343 | FILEDESC_SUNLOCK(fdp)_sx_sunlock(((&(fdp)->fd_sx)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2343)); | |||
2344 | goto out; | |||
2345 | } | |||
2346 | #endif | |||
2347 | if (fp2 != fp) { | |||
2348 | FILEDESC_SUNLOCK(fdp)_sx_sunlock(((&(fdp)->fd_sx)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2348)); | |||
2349 | error = EBADF9; | |||
2350 | goto out; | |||
2351 | } | |||
2352 | mtx_lock(&mq->mq_mutex)__mtx_lock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2352)); | |||
2353 | FILEDESC_SUNLOCK(fdp)_sx_sunlock(((&(fdp)->fd_sx)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2353)); | |||
2354 | if (sigev != NULL((void *)0)) { | |||
2355 | if (mq->mq_notifier != NULL((void *)0)) { | |||
2356 | error = EBUSY16; | |||
2357 | } else { | |||
2358 | PROC_LOCK(p)__mtx_lock_flags(&((((&(p)->p_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2358)); | |||
2359 | nt = notifier_search(p, mqd); | |||
2360 | if (nt == NULL((void *)0)) { | |||
2361 | if (newnt == NULL((void *)0)) { | |||
2362 | PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2362)); | |||
2363 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2363)); | |||
2364 | newnt = notifier_alloc(); | |||
2365 | goto again; | |||
2366 | } | |||
2367 | } | |||
2368 | ||||
2369 | if (nt != NULL((void *)0)) { | |||
2370 | sigqueue_take(&nt->nt_ksi); | |||
2371 | if (newnt != NULL((void *)0)) { | |||
2372 | notifier_free(newnt); | |||
2373 | newnt = NULL((void *)0); | |||
2374 | } | |||
2375 | } else { | |||
2376 | nt = newnt; | |||
2377 | newnt = NULL((void *)0); | |||
2378 | ksiginfo_init(&nt->nt_ksi)do { bzero(&nt->nt_ksi, sizeof(ksiginfo_t)); } while(0 ); | |||
2379 | nt->nt_ksi.ksi_flags |= KSI_INS0x04 | KSI_EXT0x02; | |||
2380 | nt->nt_ksi.ksi_codeksi_info.si_code = SI_MESGQ0x10005; | |||
2381 | nt->nt_proc = p; | |||
2382 | nt->nt_ksi.ksi_mqdksi_info._reason._mesgq._mqd = mqd; | |||
2383 | notifier_insert(p, nt); | |||
2384 | } | |||
2385 | nt->nt_sigev = *sigev; | |||
2386 | mq->mq_notifier = nt; | |||
2387 | PROC_UNLOCK(p)__mtx_unlock_flags(&((((&(p)->p_mtx))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2387)); | |||
2388 | /* | |||
2389 | * if there is no receivers and message queue | |||
2390 | * is not empty, we should send notification | |||
2391 | * as soon as possible. | |||
2392 | */ | |||
2393 | if (mq->mq_receivers == 0 && | |||
2394 | !TAILQ_EMPTY(&mq->mq_msgq)((&mq->mq_msgq)->tqh_first == ((void *)0))) | |||
2395 | mqueue_send_notification(mq); | |||
2396 | } | |||
2397 | } else { | |||
2398 | notifier_remove(p, mq, mqd); | |||
2399 | } | |||
2400 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2400)); | |||
2401 | ||||
2402 | out: | |||
2403 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2404 | if (newnt != NULL((void *)0)) | |||
2405 | notifier_free(newnt); | |||
2406 | return (error); | |||
2407 | } | |||
2408 | ||||
2409 | int | |||
2410 | sys_kmq_notify(struct thread *td, struct kmq_notify_args *uap) | |||
2411 | { | |||
2412 | struct sigevent ev, *evp; | |||
2413 | int error; | |||
2414 | ||||
2415 | if (uap->sigev == NULL((void *)0)) { | |||
2416 | evp = NULL((void *)0); | |||
2417 | } else { | |||
2418 | error = copyin(uap->sigev, &ev, sizeof(ev)); | |||
2419 | if (error != 0) | |||
2420 | return (error); | |||
2421 | evp = &ev; | |||
2422 | } | |||
2423 | return (kern_kmq_notify(td, uap->mqd, evp)); | |||
2424 | } | |||
2425 | ||||
2426 | static void | |||
2427 | mqueue_fdclose(struct thread *td, int fd, struct file *fp) | |||
2428 | { | |||
2429 | struct filedesc *fdp; | |||
2430 | struct mqueue *mq; | |||
2431 | ||||
2432 | fdp = td->td_proc->p_fd; | |||
2433 | FILEDESC_LOCK_ASSERT(fdp)(void)0; | |||
2434 | ||||
2435 | if (fp->f_ops == &mqueueops) { | |||
2436 | mq = FPTOMQ(fp)((struct mqueue *)(((struct mqfs_node *) (fp)->f_data)-> mn_data)); | |||
2437 | mtx_lock(&mq->mq_mutex)__mtx_lock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2437)); | |||
2438 | notifier_remove(td->td_proc, mq, fd); | |||
2439 | ||||
2440 | /* have to wakeup thread in same process */ | |||
2441 | if (mq->mq_flags & MQ_RSEL0x01) { | |||
2442 | mq->mq_flags &= ~MQ_RSEL0x01; | |||
2443 | selwakeup(&mq->mq_rsel); | |||
2444 | } | |||
2445 | if (mq->mq_flags & MQ_WSEL0x02) { | |||
2446 | mq->mq_flags &= ~MQ_WSEL0x02; | |||
2447 | selwakeup(&mq->mq_wsel); | |||
2448 | } | |||
2449 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2449)); | |||
2450 | } | |||
2451 | } | |||
2452 | ||||
2453 | static void | |||
2454 | mq_proc_exit(void *arg __unused__attribute__((__unused__)), struct proc *p) | |||
2455 | { | |||
2456 | struct filedesc *fdp; | |||
2457 | struct file *fp; | |||
2458 | struct mqueue *mq; | |||
2459 | int i; | |||
2460 | ||||
2461 | fdp = p->p_fd; | |||
2462 | FILEDESC_SLOCK(fdp)(void)_sx_slock(((&(fdp)->fd_sx)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2462)); | |||
2463 | for (i = 0; i < fdp->fd_nfilesfd_files->fdt_nfiles; ++i) { | |||
2464 | fp = fget_locked(fdp, i); | |||
2465 | if (fp != NULL((void *)0) && fp->f_ops == &mqueueops) { | |||
2466 | mq = FPTOMQ(fp)((struct mqueue *)(((struct mqfs_node *) (fp)->f_data)-> mn_data)); | |||
2467 | mtx_lock(&mq->mq_mutex)__mtx_lock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2467)); | |||
2468 | notifier_remove(p, FPTOMQ(fp)((struct mqueue *)(((struct mqfs_node *) (fp)->f_data)-> mn_data)), i); | |||
2469 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2469)); | |||
2470 | } | |||
2471 | } | |||
2472 | FILEDESC_SUNLOCK(fdp)_sx_sunlock(((&(fdp)->fd_sx)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2472)); | |||
2473 | KASSERT(LIST_EMPTY(&p->p_mqnotifier), ("mq notifiers left"))do { } while (0); | |||
2474 | } | |||
2475 | ||||
2476 | static int | |||
2477 | mqf_poll(struct file *fp, int events, struct ucred *active_cred, | |||
2478 | struct thread *td) | |||
2479 | { | |||
2480 | struct mqueue *mq = FPTOMQ(fp)((struct mqueue *)(((struct mqfs_node *) (fp)->f_data)-> mn_data)); | |||
2481 | int revents = 0; | |||
2482 | ||||
2483 | mtx_lock(&mq->mq_mutex)__mtx_lock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2483)); | |||
2484 | if (events & (POLLIN0x0001 | POLLRDNORM0x0040)) { | |||
2485 | if (mq->mq_curmsgs) { | |||
2486 | revents |= events & (POLLIN0x0001 | POLLRDNORM0x0040); | |||
2487 | } else { | |||
2488 | mq->mq_flags |= MQ_RSEL0x01; | |||
2489 | selrecord(td, &mq->mq_rsel); | |||
2490 | } | |||
2491 | } | |||
2492 | if (events & POLLOUT0x0004) { | |||
2493 | if (mq->mq_curmsgs < mq->mq_maxmsg) | |||
2494 | revents |= POLLOUT0x0004; | |||
2495 | else { | |||
2496 | mq->mq_flags |= MQ_WSEL0x02; | |||
2497 | selrecord(td, &mq->mq_wsel); | |||
2498 | } | |||
2499 | } | |||
2500 | mtx_unlock(&mq->mq_mutex)__mtx_unlock_flags(&((((&mq->mq_mutex))))->mtx_lock , ((0)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2500)); | |||
2501 | return (revents); | |||
2502 | } | |||
2503 | ||||
2504 | static int | |||
2505 | mqf_close(struct file *fp, struct thread *td) | |||
2506 | { | |||
2507 | struct mqfs_node *pn; | |||
2508 | ||||
2509 | fp->f_ops = &badfileops; | |||
2510 | pn = fp->f_data; | |||
2511 | fp->f_data = NULL((void *)0); | |||
2512 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2512)); | |||
2513 | mqnode_release(pn); | |||
2514 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2514)); | |||
2515 | return (0); | |||
2516 | } | |||
2517 | ||||
2518 | static int | |||
2519 | mqf_stat(struct file *fp, struct stat *st, struct ucred *active_cred, | |||
2520 | struct thread *td) | |||
2521 | { | |||
2522 | struct mqfs_node *pn = fp->f_data; | |||
2523 | ||||
2524 | bzero(st, sizeof *st); | |||
2525 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2525)); | |||
2526 | st->st_atim = pn->mn_atime; | |||
2527 | st->st_mtim = pn->mn_mtime; | |||
2528 | st->st_ctim = pn->mn_ctime; | |||
2529 | st->st_birthtim = pn->mn_birth; | |||
2530 | st->st_uid = pn->mn_uid; | |||
2531 | st->st_gid = pn->mn_gid; | |||
2532 | st->st_mode = S_IFIFO0010000 | pn->mn_mode; | |||
2533 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2533)); | |||
2534 | return (0); | |||
2535 | } | |||
2536 | ||||
2537 | static int | |||
2538 | mqf_chmod(struct file *fp, mode_t mode, struct ucred *active_cred, | |||
2539 | struct thread *td) | |||
2540 | { | |||
2541 | struct mqfs_node *pn; | |||
2542 | int error; | |||
2543 | ||||
2544 | error = 0; | |||
2545 | pn = fp->f_data; | |||
2546 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2546)); | |||
2547 | error = vaccess(VREG, pn->mn_mode, pn->mn_uid, pn->mn_gid, VADMIN000000010000, | |||
2548 | active_cred, NULL((void *)0)); | |||
2549 | if (error != 0) | |||
2550 | goto out; | |||
2551 | pn->mn_mode = mode & ACCESSPERMS(0000700|0000070|0000007); | |||
2552 | out: | |||
2553 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2553)); | |||
2554 | return (error); | |||
2555 | } | |||
2556 | ||||
2557 | static int | |||
2558 | mqf_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred, | |||
2559 | struct thread *td) | |||
2560 | { | |||
2561 | struct mqfs_node *pn; | |||
2562 | int error; | |||
2563 | ||||
2564 | error = 0; | |||
2565 | pn = fp->f_data; | |||
2566 | sx_xlock(&mqfs_data.mi_lock)(void)_sx_xlock(((&mqfs_data.mi_lock)), 0, ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2566)); | |||
2567 | if (uid == (uid_t)-1) | |||
2568 | uid = pn->mn_uid; | |||
2569 | if (gid == (gid_t)-1) | |||
2570 | gid = pn->mn_gid; | |||
2571 | if (((uid != pn->mn_uid && uid != active_cred->cr_uid) || | |||
2572 | (gid != pn->mn_gid && !groupmember(gid, active_cred))) && | |||
2573 | (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN317, 0))) | |||
2574 | goto out; | |||
2575 | pn->mn_uid = uid; | |||
2576 | pn->mn_gid = gid; | |||
2577 | out: | |||
2578 | sx_xunlock(&mqfs_data.mi_lock)_sx_xunlock(((&mqfs_data.mi_lock)), ("/usr/src/sys/modules/mqueue/../../kern/uipc_mqueue.c" ), (2578)); | |||
2579 | return (error); | |||
2580 | } | |||
2581 | ||||
2582 | static int | |||
2583 | mqf_kqfilter(struct file *fp, struct knote *kn) | |||
2584 | { | |||
2585 | struct mqueue *mq = FPTOMQ(fp)((struct mqueue *)(((struct mqfs_node *) (fp)->f_data)-> mn_data)); | |||
2586 | int error = 0; | |||
2587 | ||||
2588 | if (kn->kn_filterkn_kevent.filter == EVFILT_READ(-1)) { | |||
2589 | kn->kn_fop = &mq_rfiltops; | |||
2590 | knlist_add(&mq->mq_rsel.si_note, kn, 0); | |||
2591 | } else if (kn->kn_filterkn_kevent.filter == EVFILT_WRITE(-2)) { | |||
2592 | kn->kn_fop = &mq_wfiltops; | |||
2593 | knlist_add(&mq->mq_wsel.si_note, kn, 0); | |||
2594 | } else | |||
2595 | error = EINVAL22; | |||
2596 | return (error); | |||
2597 | } | |||
2598 | ||||
2599 | static void | |||
2600 | filt_mqdetach(struct knote *kn) | |||
2601 | { | |||
2602 | struct mqueue *mq = FPTOMQ(kn->kn_fp)((struct mqueue *)(((struct mqfs_node *) (kn->kn_ptr.p_fp) ->f_data)->mn_data)); | |||
2603 | ||||
2604 | if (kn->kn_filterkn_kevent.filter == EVFILT_READ(-1)) | |||
2605 | knlist_remove(&mq->mq_rsel.si_note, kn, 0); | |||
2606 | else if (kn->kn_filterkn_kevent.filter == EVFILT_WRITE(-2)) | |||
2607 | knlist_remove(&mq->mq_wsel.si_note, kn, 0); | |||
2608 | else | |||
2609 | panic("filt_mqdetach"); | |||
2610 | } | |||
2611 | ||||
2612 | static int | |||
2613 | filt_mqread(struct knote *kn, long hint) | |||
2614 | { | |||
2615 | struct mqueue *mq = FPTOMQ(kn->kn_fp)((struct mqueue *)(((struct mqfs_node *) (kn->kn_ptr.p_fp) ->f_data)->mn_data)); | |||
2616 | ||||
2617 | mtx_assert(&mq->mq_mutex, MA_OWNED)(void)0; | |||
2618 | return (mq->mq_curmsgs != 0); | |||
2619 | } | |||
2620 | ||||
2621 | static int | |||
2622 | filt_mqwrite(struct knote *kn, long hint) | |||
2623 | { | |||
2624 | struct mqueue *mq = FPTOMQ(kn->kn_fp)((struct mqueue *)(((struct mqfs_node *) (kn->kn_ptr.p_fp) ->f_data)->mn_data)); | |||
2625 | ||||
2626 | mtx_assert(&mq->mq_mutex, MA_OWNED)(void)0; | |||
2627 | return (mq->mq_curmsgs < mq->mq_maxmsg); | |||
2628 | } | |||
2629 | ||||
2630 | static int | |||
2631 | mqf_fill_kinfo(struct file *fp, struct kinfo_file *kif, struct filedesc *fdp) | |||
2632 | { | |||
2633 | ||||
2634 | kif->kf_type = KF_TYPE_MQUEUE7; | |||
2635 | return (0); | |||
2636 | } | |||
2637 | ||||
2638 | static struct fileops mqueueops = { | |||
2639 | .fo_read = invfo_rdwr, | |||
2640 | .fo_write = invfo_rdwr, | |||
2641 | .fo_truncate = invfo_truncate, | |||
2642 | .fo_ioctl = invfo_ioctl, | |||
2643 | .fo_poll = mqf_poll, | |||
2644 | .fo_kqfilter = mqf_kqfilter, | |||
2645 | .fo_stat = mqf_stat, | |||
2646 | .fo_close = mqf_close, | |||
2647 | .fo_chmod = mqf_chmod, | |||
2648 | .fo_chown = mqf_chown, | |||
2649 | .fo_sendfile = invfo_sendfile, | |||
2650 | .fo_fill_kinfo = mqf_fill_kinfo, | |||
2651 | }; | |||
2652 | ||||
2653 | static struct vop_vector mqfs_vnodeops = { | |||
2654 | .vop_default = &default_vnodeops, | |||
2655 | .vop_access = mqfs_access, | |||
2656 | .vop_cachedlookup = mqfs_lookup, | |||
2657 | .vop_lookup = vfs_cache_lookup, | |||
2658 | .vop_reclaim = mqfs_reclaim, | |||
2659 | .vop_create = mqfs_create, | |||
2660 | .vop_remove = mqfs_remove, | |||
2661 | .vop_inactive = mqfs_inactive, | |||
2662 | .vop_open = mqfs_open, | |||
2663 | .vop_close = mqfs_close, | |||
2664 | .vop_getattr = mqfs_getattr, | |||
2665 | .vop_setattr = mqfs_setattr, | |||
2666 | .vop_read = mqfs_read, | |||
2667 | .vop_write = VOP_EOPNOTSUPP((void*)(uintptr_t)vop_eopnotsupp), | |||
2668 | .vop_readdir = mqfs_readdir, | |||
2669 | .vop_mkdir = VOP_EOPNOTSUPP((void*)(uintptr_t)vop_eopnotsupp), | |||
2670 | .vop_rmdir = VOP_EOPNOTSUPP((void*)(uintptr_t)vop_eopnotsupp) | |||
2671 | }; | |||
2672 | ||||
2673 | static struct vfsops mqfs_vfsops = { | |||
2674 | .vfs_init = mqfs_init, | |||
2675 | .vfs_uninit = mqfs_uninit, | |||
2676 | .vfs_mount = mqfs_mount, | |||
2677 | .vfs_unmount = mqfs_unmount, | |||
2678 | .vfs_root = mqfs_root, | |||
2679 | .vfs_statfs = mqfs_statfs, | |||
2680 | }; | |||
2681 | ||||
2682 | static struct vfsconf mqueuefs_vfsconf = { | |||
2683 | .vfc_version = VFS_VERSION0x20121030, | |||
2684 | .vfc_name = "mqueuefs", | |||
2685 | .vfc_vfsops = &mqfs_vfsops, | |||
2686 | .vfc_typenum = -1, | |||
2687 | .vfc_flags = VFCF_SYNTHETIC0x00080000 | |||
2688 | }; | |||
2689 | ||||
2690 | static struct syscall_helper_data mq_syscalls[] = { | |||
2691 | SYSCALL_INIT_HELPER(kmq_open){ .new_sysent = { .sy_narg = (sizeof(struct kmq_open_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_open , .sy_auevent = 0 }, .syscall_no = 457 }, | |||
2692 | SYSCALL_INIT_HELPER(kmq_setattr){ .new_sysent = { .sy_narg = (sizeof(struct kmq_setattr_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_setattr , .sy_auevent = 0 }, .syscall_no = 458 }, | |||
2693 | SYSCALL_INIT_HELPER(kmq_timedsend){ .new_sysent = { .sy_narg = (sizeof(struct kmq_timedsend_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_timedsend , .sy_auevent = 0 }, .syscall_no = 460 }, | |||
2694 | SYSCALL_INIT_HELPER(kmq_timedreceive){ .new_sysent = { .sy_narg = (sizeof(struct kmq_timedreceive_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_timedreceive , .sy_auevent = 0 }, .syscall_no = 459 }, | |||
2695 | SYSCALL_INIT_HELPER(kmq_notify){ .new_sysent = { .sy_narg = (sizeof(struct kmq_notify_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_notify , .sy_auevent = 0 }, .syscall_no = 461 }, | |||
2696 | SYSCALL_INIT_HELPER(kmq_unlink){ .new_sysent = { .sy_narg = (sizeof(struct kmq_unlink_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_unlink , .sy_auevent = 0 }, .syscall_no = 462 }, | |||
2697 | SYSCALL_INIT_LAST{ .syscall_no = (-1) } | |||
2698 | }; | |||
2699 | ||||
2700 | #ifdef COMPAT_FREEBSD321 | |||
2701 | #include <compat/freebsd32/freebsd32.h> | |||
2702 | #include <compat/freebsd32/freebsd32_proto.h> | |||
2703 | #include <compat/freebsd32/freebsd32_signal.h> | |||
2704 | #include <compat/freebsd32/freebsd32_syscall.h> | |||
2705 | #include <compat/freebsd32/freebsd32_util.h> | |||
2706 | ||||
2707 | static void | |||
2708 | mq_attr_from32(const struct mq_attr32 *from, struct mq_attr *to) | |||
2709 | { | |||
2710 | ||||
2711 | to->mq_flags = from->mq_flags; | |||
2712 | to->mq_maxmsg = from->mq_maxmsg; | |||
2713 | to->mq_msgsize = from->mq_msgsize; | |||
2714 | to->mq_curmsgs = from->mq_curmsgs; | |||
2715 | } | |||
2716 | ||||
2717 | static void | |||
2718 | mq_attr_to32(const struct mq_attr *from, struct mq_attr32 *to) | |||
2719 | { | |||
2720 | ||||
2721 | to->mq_flags = from->mq_flags; | |||
2722 | to->mq_maxmsg = from->mq_maxmsg; | |||
2723 | to->mq_msgsize = from->mq_msgsize; | |||
2724 | to->mq_curmsgs = from->mq_curmsgs; | |||
2725 | } | |||
2726 | ||||
2727 | int | |||
2728 | freebsd32_kmq_open(struct thread *td, struct freebsd32_kmq_open_args *uap) | |||
2729 | { | |||
2730 | struct mq_attr attr; | |||
2731 | struct mq_attr32 attr32; | |||
2732 | int flags, error; | |||
2733 | ||||
2734 | if ((uap->flags & O_ACCMODE0x0003) == O_ACCMODE0x0003 || uap->flags & O_EXEC0x00040000) | |||
2735 | return (EINVAL22); | |||
2736 | flags = FFLAGS(uap->flags)((uap->flags) & 0x00040000 ? (uap->flags) : (uap-> flags) + 1); | |||
2737 | if ((flags & O_CREAT0x0200) != 0 && uap->attr != NULL((void *)0)) { | |||
2738 | error = copyin(uap->attr, &attr32, sizeof(attr32)); | |||
2739 | if (error) | |||
2740 | return (error); | |||
2741 | mq_attr_from32(&attr32, &attr); | |||
2742 | } | |||
2743 | return (kern_kmq_open(td, uap->path, flags, uap->mode, | |||
2744 | uap->attr != NULL((void *)0) ? &attr : NULL((void *)0))); | |||
2745 | } | |||
2746 | ||||
2747 | int | |||
2748 | freebsd32_kmq_setattr(struct thread *td, struct freebsd32_kmq_setattr_args *uap) | |||
2749 | { | |||
2750 | struct mq_attr attr, oattr; | |||
2751 | struct mq_attr32 attr32, oattr32; | |||
2752 | int error; | |||
2753 | ||||
2754 | if (uap->attr != NULL((void *)0)) { | |||
| ||||
2755 | error = copyin(uap->attr, &attr32, sizeof(attr32)); | |||
2756 | if (error != 0) | |||
2757 | return (error); | |||
2758 | mq_attr_from32(&attr32, &attr); | |||
2759 | } | |||
2760 | error = kern_kmq_setattr(td, uap->mqd, uap->attr != NULL((void *)0) ? &attr : NULL((void *)0), | |||
2761 | &oattr); | |||
2762 | if (error != 0) | |||
2763 | return (error); | |||
2764 | if (uap->oattr != NULL((void *)0)) { | |||
2765 | mq_attr_to32(&oattr, &oattr32); | |||
2766 | error = copyout(&oattr32, uap->oattr, sizeof(oattr32)); | |||
| ||||
2767 | } | |||
2768 | return (error); | |||
2769 | } | |||
2770 | ||||
2771 | int | |||
2772 | freebsd32_kmq_timedsend(struct thread *td, | |||
2773 | struct freebsd32_kmq_timedsend_args *uap) | |||
2774 | { | |||
2775 | struct mqueue *mq; | |||
2776 | struct file *fp; | |||
2777 | struct timespec32 ets32; | |||
2778 | struct timespec *abs_timeout, ets; | |||
2779 | int error; | |||
2780 | int waitok; | |||
2781 | ||||
2782 | error = getmq_write(td, uap->mqd, &fp, NULL((void *)0), &mq); | |||
2783 | if (error) | |||
2784 | return (error); | |||
2785 | if (uap->abs_timeout != NULL((void *)0)) { | |||
2786 | error = copyin(uap->abs_timeout, &ets32, sizeof(ets32)); | |||
2787 | if (error != 0) | |||
2788 | return (error); | |||
2789 | CP(ets32, ets, tv_sec)do { (ets).tv_sec = (ets32).tv_sec; } while (0); | |||
2790 | CP(ets32, ets, tv_nsec)do { (ets).tv_nsec = (ets32).tv_nsec; } while (0); | |||
2791 | abs_timeout = &ets; | |||
2792 | } else | |||
2793 | abs_timeout = NULL((void *)0); | |||
2794 | waitok = !(fp->f_flag & O_NONBLOCK0x0004); | |||
2795 | error = mqueue_send(mq, uap->msg_ptr, uap->msg_len, | |||
2796 | uap->msg_prio, waitok, abs_timeout); | |||
2797 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2798 | return (error); | |||
2799 | } | |||
2800 | ||||
2801 | int | |||
2802 | freebsd32_kmq_timedreceive(struct thread *td, | |||
2803 | struct freebsd32_kmq_timedreceive_args *uap) | |||
2804 | { | |||
2805 | struct mqueue *mq; | |||
2806 | struct file *fp; | |||
2807 | struct timespec32 ets32; | |||
2808 | struct timespec *abs_timeout, ets; | |||
2809 | int error, waitok; | |||
2810 | ||||
2811 | error = getmq_read(td, uap->mqd, &fp, NULL((void *)0), &mq); | |||
2812 | if (error) | |||
2813 | return (error); | |||
2814 | if (uap->abs_timeout != NULL((void *)0)) { | |||
2815 | error = copyin(uap->abs_timeout, &ets32, sizeof(ets32)); | |||
2816 | if (error != 0) | |||
2817 | return (error); | |||
2818 | CP(ets32, ets, tv_sec)do { (ets).tv_sec = (ets32).tv_sec; } while (0); | |||
2819 | CP(ets32, ets, tv_nsec)do { (ets).tv_nsec = (ets32).tv_nsec; } while (0); | |||
2820 | abs_timeout = &ets; | |||
2821 | } else | |||
2822 | abs_timeout = NULL((void *)0); | |||
2823 | waitok = !(fp->f_flag & O_NONBLOCK0x0004); | |||
2824 | error = mqueue_receive(mq, uap->msg_ptr, uap->msg_len, | |||
2825 | uap->msg_prio, waitok, abs_timeout); | |||
2826 | fdrop(fp, td)(refcount_release(&(fp)->f_count) ? _fdrop((fp), (td)) : _fnoop()); | |||
2827 | return (error); | |||
2828 | } | |||
2829 | ||||
2830 | int | |||
2831 | freebsd32_kmq_notify(struct thread *td, struct freebsd32_kmq_notify_args *uap) | |||
2832 | { | |||
2833 | struct sigevent ev, *evp; | |||
2834 | struct sigevent32 ev32; | |||
2835 | int error; | |||
2836 | ||||
2837 | if (uap->sigev == NULL((void *)0)) { | |||
2838 | evp = NULL((void *)0); | |||
2839 | } else { | |||
2840 | error = copyin(uap->sigev, &ev32, sizeof(ev32)); | |||
2841 | if (error != 0) | |||
2842 | return (error); | |||
2843 | error = convert_sigevent32(&ev32, &ev); | |||
2844 | if (error != 0) | |||
2845 | return (error); | |||
2846 | evp = &ev; | |||
2847 | } | |||
2848 | return (kern_kmq_notify(td, uap->mqd, evp)); | |||
2849 | } | |||
2850 | ||||
2851 | static struct syscall_helper_data mq32_syscalls[] = { | |||
2852 | SYSCALL32_INIT_HELPER(freebsd32_kmq_open){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_kmq_open_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_kmq_open , }, .syscall_no = 457 }, | |||
2853 | SYSCALL32_INIT_HELPER(freebsd32_kmq_setattr){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_kmq_setattr_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_kmq_setattr , }, .syscall_no = 458 }, | |||
2854 | SYSCALL32_INIT_HELPER(freebsd32_kmq_timedsend){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_kmq_timedsend_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_kmq_timedsend , }, .syscall_no = 460 }, | |||
2855 | SYSCALL32_INIT_HELPER(freebsd32_kmq_timedreceive){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_kmq_timedreceive_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_kmq_timedreceive , }, .syscall_no = 459 }, | |||
2856 | SYSCALL32_INIT_HELPER(freebsd32_kmq_notify){ .new_sysent = { .sy_narg = (sizeof(struct freebsd32_kmq_notify_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& freebsd32_kmq_notify , }, .syscall_no = 461 }, | |||
2857 | SYSCALL32_INIT_HELPER_COMPAT(kmq_unlink){ .new_sysent = { .sy_narg = (sizeof(struct kmq_unlink_args ) / sizeof(register_t)), .sy_call = (sy_call_t *)& sys_kmq_unlink , }, .syscall_no = 462 }, | |||
2858 | SYSCALL_INIT_LAST{ .syscall_no = (-1) } | |||
2859 | }; | |||
2860 | #endif | |||
2861 | ||||
2862 | static int | |||
2863 | mqinit(void) | |||
2864 | { | |||
2865 | int error; | |||
2866 | ||||
2867 | error = syscall_helper_register(mq_syscalls, SY_THR_STATIC_KLD0); | |||
2868 | if (error != 0) | |||
2869 | return (error); | |||
2870 | #ifdef COMPAT_FREEBSD321 | |||
2871 | error = syscall32_helper_register(mq32_syscalls, SY_THR_STATIC_KLD0); | |||
2872 | if (error != 0) | |||
2873 | return (error); | |||
2874 | #endif | |||
2875 | return (0); | |||
2876 | } | |||
2877 | ||||
2878 | static int | |||
2879 | mqunload(void) | |||
2880 | { | |||
2881 | ||||
2882 | #ifdef COMPAT_FREEBSD321 | |||
2883 | syscall32_helper_unregister(mq32_syscalls); | |||
2884 | #endif | |||
2885 | syscall_helper_unregister(mq_syscalls); | |||
2886 | return (0); | |||
2887 | } | |||
2888 | ||||
2889 | static int | |||
2890 | mq_modload(struct module *module, int cmd, void *arg) | |||
2891 | { | |||
2892 | int error = 0; | |||
2893 | ||||
2894 | error = vfs_modevent(module, cmd, arg); | |||
2895 | if (error != 0) | |||
2896 | return (error); | |||
2897 | ||||
2898 | switch (cmd) { | |||
2899 | case MOD_LOAD: | |||
2900 | error = mqinit(); | |||
2901 | if (error != 0) | |||
2902 | mqunload(); | |||
2903 | break; | |||
2904 | case MOD_UNLOAD: | |||
2905 | error = mqunload(); | |||
2906 | break; | |||
2907 | default: | |||
2908 | break; | |||
2909 | } | |||
2910 | return (error); | |||
2911 | } | |||
2912 | ||||
2913 | static moduledata_t mqueuefs_mod = { | |||
2914 | "mqueuefs", | |||
2915 | mq_modload, | |||
2916 | &mqueuefs_vfsconf | |||
2917 | }; | |||
2918 | DECLARE_MODULE(mqueuefs, mqueuefs_mod, SI_SUB_VFS, SI_ORDER_MIDDLE)static struct mod_depend _mqueuefs_depend_on_kernel __attribute__ ((__section__(".data"))) = { 1100122, 1100122, (((((1100122)+ ((100000)-1))/(100000))*(100000)) - 1) }; static struct mod_metadata _mod_metadata_md_mqueuefs_on_kernel = { 1, 1, &_mqueuefs_depend_on_kernel , "kernel" }; __asm__(".globl " "__start_set_modmetadata_set" ); __asm__(".globl " "__stop_set_modmetadata_set"); static void const * const __set_modmetadata_set_sym__mod_metadata_md_mqueuefs_on_kernel __attribute__((__section__("set_" "modmetadata_set"))) __attribute__ ((__used__)) = &(_mod_metadata_md_mqueuefs_on_kernel); static struct mod_metadata _mod_metadata_md_mqueuefs = { 1, 2, & mqueuefs_mod, "mqueuefs" }; __asm__(".globl " "__start_set_modmetadata_set" ); __asm__(".globl " "__stop_set_modmetadata_set"); static void const * const __set_modmetadata_set_sym__mod_metadata_md_mqueuefs __attribute__((__section__("set_" "modmetadata_set"))) __attribute__ ((__used__)) = &(_mod_metadata_md_mqueuefs); static struct sysinit mqueuefsmodule_sys_init = { SI_SUB_VFS, SI_ORDER_MIDDLE , (sysinit_cfunc_t)(sysinit_nfunc_t)module_register_init, ((void *)(&mqueuefs_mod)) }; __asm__(".globl " "__start_set_sysinit_set" ); __asm__(".globl " "__stop_set_sysinit_set"); static void const * const __set_sysinit_set_sym_mqueuefsmodule_sys_init __attribute__ ((__section__("set_" "sysinit_set"))) __attribute__((__used__ )) = &(mqueuefsmodule_sys_init); struct __hack; | |||
2919 | MODULE_VERSION(mqueuefs, 1)static struct mod_version _mqueuefs_version __attribute__((__section__ (".data"))) = { 1 }; static struct mod_metadata _mod_metadata_mqueuefs_version = { 1, 3, &_mqueuefs_version, "mqueuefs" }; __asm__(".globl " "__start_set_modmetadata_set"); __asm__(".globl " "__stop_set_modmetadata_set" ); static void const * const __set_modmetadata_set_sym__mod_metadata_mqueuefs_version __attribute__((__section__("set_" "modmetadata_set"))) __attribute__ ((__used__)) = &(_mod_metadata_mqueuefs_version); |