1 /* $NetBSD: smbfs_kq.c,v 1.7 2003/06/29 22:31:12 fvdl Exp $ */
2
3 /*-
4 * Copyright (c) 2003 The NetBSD Foundation, Inc.
5 * All rights reserved.
6 *
7 * This code is derived from software contributed to The NetBSD Foundation
8 * by Jaromir Dolecek.
9 *
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
12 * are met:
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
18 * 3. All advertising materials mentioning features or use of this software
19 * must display the following acknowledgement:
20 * This product includes software developed by the NetBSD
21 * Foundation, Inc. and its contributors.
22 * 4. Neither the name of The NetBSD Foundation nor the names of its
23 * contributors may be used to endorse or promote products derived
24 * from this software without specific prior written permission.
25 *
26 * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
27 * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
28 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
29 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
30 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
31 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
32 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
33 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
34 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
35 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
36 * POSSIBILITY OF SUCH DAMAGE.
37 */
38
39 #include <sys/cdefs.h>
40 __KERNEL_RCSID(0, "$NetBSD: smbfs_kq.c,v 1.7 2003/06/29 22:31:12 fvdl Exp $");
41
42 #include <sys/param.h>
43 #include <sys/systm.h>
44 #include <sys/namei.h>
45 #include <sys/kernel.h>
46 #include <sys/proc.h>
47 #include <sys/buf.h>
48 #include <sys/fcntl.h>
49 #include <sys/mount.h>
50 #include <sys/unistd.h>
51 #include <sys/vnode.h>
52 #include <sys/lockf.h>
53 #include <sys/malloc.h>
54 #include <sys/kthread.h>
55 #include <sys/file.h>
56 #include <sys/dirent.h>
57
58 #include <machine/limits.h>
59
60 #include <uvm/uvm.h>
61 #include <uvm/uvm_extern.h>
62
63 #include <netsmb/smb.h>
64 #include <netsmb/smb_conn.h>
65 #include <netsmb/smb_subr.h>
66 #include <netsmb/smb_rq.h>
67
68 #include <fs/smbfs/smbfs.h>
69 #include <fs/smbfs/smbfs_node.h>
70 #include <fs/smbfs/smbfs_subr.h>
71
72 #include <miscfs/genfs/genfs.h>
73
74 /*
75 * The maximum of outstanding SMB requests is 65536, since the
76 * message id is 16bit. Don't consume all. If there is more
77 * than 30k directory notify requests, fall back to polling mode.
78 */
79 #define DNOTIFY_MAX 30000
80
81 struct kevq {
82 SLIST_ENTRY(kevq) kev_link; /* link on kevlist */
83 SLIST_ENTRY(kevq) k_link; /* link on poll/dn list */
84
85 struct vnode *vp;
86 u_int usecount;
87 u_int flags;
88 #define KEVQ_BUSY 0x01 /* currently being processed */
89 #define KEVQ_WANT 0x02 /* want to change this entry */
90 #define KEVQ_DNOT 0x04 /* kevent using NT directory change notify */
91 struct timespec omtime; /* old modification time */
92 struct timespec octime; /* old change time */
93 nlink_t onlink; /* old number of references to file */
94 struct smb_rq *rq; /* request structure */
95 };
96
97 static struct proc *smbkqp; /* the kevent handler */
98 static struct smb_cred smbkq_scred;
99
100 static struct simplelock smbkq_lock = SIMPLELOCK_INITIALIZER;
101 /* guard access to k*evlist */
102 static SLIST_HEAD(, kevq) kevlist = SLIST_HEAD_INITIALIZER(kevlist);
103 static SLIST_HEAD(, kevq) kplist = SLIST_HEAD_INITIALIZER(kplist);
104 static SLIST_HEAD(, kevq) kdnlist = SLIST_HEAD_INITIALIZER(kdnlist);
105
106 static int dnot_num = 0; /* number of active dir notifications */
107 static u_int32_t kevs;
108
109 static void smbfskq_dirnotify(void *);
110
111 /*
112 * This routine periodically checks server for change
113 * of any of the watched files every SMBFS_MINATTRTIME/2 seconds.
114 * Only changes in size, modification time, change time and nlinks
115 * are being checked, everything else is ignored.
116 * Directory events are watched via NT DIRECTORY CHANGE NOTIFY
117 * if the server supports it.
118 *
119 * The routine only calls VOP_GETATTR() when it's likely it would get
120 * some new data, i.e. when the vnode expires from attrcache. This
121 * should give same result as periodically running stat(2) from userland,
122 * while keeping CPU/network usage low, and still provide proper kevent
123 * semantics.
124 * The poller thread is created when first vnode is added to watch list,
125 * and exits when the watch list is empty. The overhead of thread creation
126 * isn't really important, neither speed of attach and detach of knote.
127 */
128 /* ARGSUSED */
129 static void
130 smbfs_kqpoll(void *arg)
131 {
132 struct kevq *ke;
133 struct vattr attr;
134 int error=0;
135 struct proc *p = smbkqp;
136 u_quad_t osize;
137 int needwake;
138
139 simple_lock(&smbkq_lock);
140 for(;;) {
141 /* check all entries on poll list for changes */
142 SLIST_FOREACH(ke, &kplist, k_link) {
143 /* skip if still in attrcache */
144 if (smbfs_attr_cachelookup(ke->vp, &attr) != ENOENT)
145 continue;
146
147 /*
148 * Mark entry busy, release lock and check
149 * for changes.
150 */
151 ke->flags |= KEVQ_BUSY;
152 simple_unlock(&smbkq_lock);
153
154 /* save v_size, smbfs_getattr() updates it */
155 osize = ke->vp->v_size;
156
157 error = VOP_GETATTR(ke->vp, &attr, p->p_ucred, p);
158 if (error) {
159 /* relock and proceed with next */
160 simple_lock(&smbkq_lock);
161 continue;
162 }
163
164 /* following is a bit fragile, but about best
165 * we can get */
166 if (ke->vp->v_type != VDIR && attr.va_size != osize) {
167 int extended = (attr.va_size > osize);
168 VN_KNOTE(ke->vp, NOTE_WRITE
169 | (extended ? NOTE_EXTEND : 0));
170 ke->omtime = attr.va_mtime;
171 } else if (attr.va_mtime.tv_sec != ke->omtime.tv_sec
172 || attr.va_mtime.tv_nsec != ke->omtime.tv_nsec) {
173 VN_KNOTE(ke->vp, NOTE_WRITE);
174 ke->omtime = attr.va_mtime;
175 }
176
177 if (attr.va_ctime.tv_sec != ke->octime.tv_sec
178 || attr.va_ctime.tv_nsec != ke->octime.tv_nsec) {
179 VN_KNOTE(ke->vp, NOTE_ATTRIB);
180 ke->octime = attr.va_ctime;
181 }
182
183 if (attr.va_nlink != ke->onlink) {
184 VN_KNOTE(ke->vp, NOTE_LINK);
185 ke->onlink = attr.va_nlink;
186 }
187
188 simple_lock(&smbkq_lock);
189 ke->flags &= ~KEVQ_BUSY;
190 if (ke->flags & KEVQ_WANT) {
191 ke->flags &= ~KEVQ_WANT;
192 wakeup(ke);
193 }
194 }
195
196 /* Exit if there are no more kevents to watch for */
197 if (kevs == 0) {
198 smbkqp = NULL;
199 break;
200 }
201
202 /* only wake periodically if poll list is nonempty */
203 needwake = !SLIST_EMPTY(&kplist);
204
205 /* wait a while before checking for changes again */
206 if (SLIST_EMPTY(&kdnlist)) {
207 error = ltsleep(smbkqp, PSOCK, "smbkqidl",
208 needwake ? (SMBFS_ATTRTIMO * hz / 2) : 0,
209 &smbkq_lock);
210 }
211
212 if (!error) {
213 /* woken up, check if any pending notifications */
214 while (!SLIST_EMPTY(&kdnlist)) {
215 int s, hint;
216
217 s = splnet();
218 ke = SLIST_FIRST(&kdnlist);
219 SLIST_REMOVE_HEAD(&kdnlist, k_link);
220 SLIST_NEXT(ke, k_link) = NULL;
221 splx(s);
222
223 /* drop lock while processing */
224 simple_unlock(&smbkq_lock);
225
226 /*
227 * Skip fetch if not yet setup.
228 */
229 if (__predict_false(ke->rq == NULL))
230 goto notifyrq;
231
232 error = smbfs_smb_nt_dirnotify_fetch(ke->rq,
233 &hint);
234 ke->rq = NULL; /* rq deallocated by now */
235 if (error) {
236 /*
237 * if there is error, switch to
238 * polling for this one
239 */
240 ke->flags &= KEVQ_DNOT;
241 SLIST_INSERT_HEAD(&kplist, ke, k_link);
242 continue;
243 }
244
245 VN_KNOTE(ke->vp, hint);
246
247 notifyrq:
248 /* reissue the notify request */
249 (void) smbfs_smb_nt_dirnotify_setup(
250 VTOSMB(ke->vp),
251 &ke->rq, &smbkq_scred,
252 smbfskq_dirnotify, ke);
253
254 /* reacquire the lock */
255 simple_lock(&smbkq_lock);
256 }
257 }
258 }
259 simple_unlock(&smbkq_lock);
260
261 kthread_exit(0);
262 }
263
264 static void
265 smbfskq_dirnotify(void *arg)
266 {
267 struct kevq *ke = arg;
268
269 if (SLIST_NEXT(ke, k_link)) {
270 /* already on notify list */
271 return;
272 }
273
274 SLIST_INSERT_HEAD(&kdnlist, ke, k_link);
275 wakeup(smbkqp);
276 }
277
278 static void
279 filt_smbfsdetach(struct knote *kn)
280 {
281 struct kevq *ke = (struct kevq *)kn->kn_hook;
282 struct smb_rq *rq = NULL;
283
284 /* XXXLUKEM lock the struct? */
285 SLIST_REMOVE(&ke->vp->v_klist, kn, knote, kn_selnext);
286
287 /* Remove the vnode from watch list */
288 simple_lock(&smbkq_lock);
289
290 /* the handler does something to it, wait */
291 while (ke->flags & KEVQ_BUSY) {
292 ke->flags |= KEVQ_WANT;
293 ltsleep(ke, PSOCK, "smbkqdw", 0, &smbkq_lock);
294 }
295
296 if (ke->usecount > 1) {
297 /* keep, other kevents need this */
298 ke->usecount--;
299 } else {
300 /* last user, g/c */
301 if (ke->flags & KEVQ_DNOT) {
302 dnot_num--;
303 rq = ke->rq;
304
305 /* If on dirnotify list, remove */
306 if (SLIST_NEXT(ke, k_link))
307 SLIST_REMOVE(&kdnlist, ke, kevq, k_link);
308 } else
309 SLIST_REMOVE(&kplist, ke, kevq, k_link);
310 SLIST_REMOVE(&kevlist, ke, kevq, kev_link);
311 FREE(ke, M_KEVENT);
312 }
313 kevs--;
314
315 simple_unlock(&smbkq_lock);
316
317 /* If there was request still pending, cancel it now */
318 if (rq) {
319 smb_iod_removerq(rq);
320
321 /*
322 * Explicitly cancel the request, so that server can
323 * free directory change notify resources.
324 */
325 smbfs_smb_ntcancel(SSTOCP(rq->sr_share), rq->sr_mid,
326 &smbkq_scred);
327
328 /* Free */
329 smb_rq_done(rq);
330 }
331 }
332
333 static int
334 filt_smbfsread(struct knote *kn, long hint)
335 {
336 struct kevq *ke = (struct kevq *)kn->kn_hook;
337 struct vnode *vp = ke->vp;
338
339 /*
340 * filesystem is gone, so set the EOF flag and schedule
341 * the knote for deletion.
342 */
343 if (hint == NOTE_REVOKE) {
344 kn->kn_flags |= (EV_EOF | EV_ONESHOT);
345 return (1);
346 }
347
348 /* There is no size info for directories */
349 if (vp->v_type == VDIR) {
350 /*
351 * This is kind of hackish, since we need to
352 * set the flag when we are called with the hint
353 * to make confirming call from kern_event.c
354 * succeed too, but need to unset it afterwards
355 * so that the directory wouldn't stay flagged
356 * as changed.
357 * XXX perhaps just fail for directories?
358 */
359 if (hint & NOTE_WRITE) {
360 kn->kn_fflags |= NOTE_WRITE;
361 return (1 * sizeof(struct dirent));
362 } else if (hint == 0 && (kn->kn_fflags & NOTE_WRITE)) {
363 kn->kn_fflags &= ~NOTE_WRITE;
364 return (1 * sizeof(struct dirent));
365 } else
366 return (0);
367 }
368
369 /* XXXLUKEM lock the struct? */
370 kn->kn_data = vp->v_size - kn->kn_fp->f_offset;
371 return (kn->kn_data != 0);
372 }
373
374 static int
375 filt_smbfsvnode(struct knote *kn, long hint)
376 {
377
378 if (kn->kn_sfflags & hint)
379 kn->kn_fflags |= hint;
380 if (hint == NOTE_REVOKE) {
381 kn->kn_flags |= EV_EOF;
382 return (1);
383 }
384 return (kn->kn_fflags != 0);
385 }
386
387 static const struct filterops smbfsread_filtops =
388 { 1, NULL, filt_smbfsdetach, filt_smbfsread };
389 static const struct filterops smbfsvnode_filtops =
390 { 1, NULL, filt_smbfsdetach, filt_smbfsvnode };
391
392 int
393 smbfs_kqfilter(void *v)
394 {
395 struct vop_kqfilter_args /* {
396 struct vnode *a_vp;
397 struct knote *a_kn;
398 } */ *ap = v;
399 struct vnode *vp = ap->a_vp;
400 struct knote *kn = ap->a_kn;
401 struct kevq *ke, *ken;
402 int error = 0;
403 struct vattr attr;
404 struct proc *p = curproc; /* XXX */
405 int dnot;
406 struct smb_vc *vcp = SSTOVC(VTOSMB(vp)->n_mount->sm_share);
407
408 switch (kn->kn_filter) {
409 case EVFILT_READ:
410 kn->kn_fop = &smbfsread_filtops;
411 break;
412 case EVFILT_VNODE:
413 kn->kn_fop = &smbfsvnode_filtops;
414 break;
415 default:
416 return (1);
417 }
418
419 /* Find out if we can use directory change notify for this file */
420 dnot = (vp->v_type == VDIR
421 && (SMB_CAPS(vcp) & SMB_CAP_NT_SMBS)
422 && dnot_num < DNOTIFY_MAX);
423
424 /*
425 * Put the vnode to watched list.
426 */
427 kevs++;
428
429 /*
430 * Fetch current attributes. It's only needed when the vnode
431 * is not watched yet, but we need to do this without lock
432 * held. This is likely cheap due to attrcache, so do it now.
433 */
434 memset(&attr, 0, sizeof(attr));
435 (void) VOP_GETATTR(vp, &attr, p->p_ucred, p);
436
437 /* ensure the handler is running */
438 if (!smbkqp) {
439 error = kthread_create1(smbfs_kqpoll, NULL, &smbkqp,
440 "smbkq");
441 smb_makescred(&smbkq_scred, smbkqp, smbkqp->p_ucred);
442 if (error) {
443 kevs--;
444 return (error);
445 }
446 }
447
448 /*
449 * Allocate new kev. It's more probable it will be needed,
450 * and the malloc is cheaper than scanning possibly
451 * large kevlist list second time after malloc.
452 */
453 MALLOC(ken, struct kevq *, sizeof(struct kevq), M_KEVENT, M_WAITOK);
454
455 /* Check the list and insert new entry */
456 simple_lock(&smbkq_lock);
457 SLIST_FOREACH(ke, &kevlist, kev_link) {
458 if (ke->vp == vp)
459 break;
460 }
461
462 if (ke) {
463 /* already watched, so just bump usecount */
464 ke->usecount++;
465 FREE(ken, M_KEVENT); /* dispose, don't need */
466 } else {
467 /* need a new one */
468 memset(ken, 0, sizeof(*ken));
469 ke = ken;
470 ke->vp = vp;
471 ke->usecount = 1;
472 ke->flags = (dnot) ? KEVQ_DNOT : 0;
473 ke->omtime = attr.va_mtime;
474 ke->octime = attr.va_ctime;
475 ke->onlink = attr.va_nlink;
476
477 if (dnot) {
478 int s;
479
480 /*
481 * Add kevent to list of 'need attend' kevnets.
482 * The handler will pick it up and setup request
483 * appropriately.
484 */
485 s = splnet();
486 SLIST_INSERT_HEAD(&kdnlist, ke, k_link);
487 splx(s);
488 dnot_num++;
489 } else {
490 /* add to poll list */
491 SLIST_INSERT_HEAD(&kplist, ke, k_link);
492 }
493
494 SLIST_INSERT_HEAD(&kevlist, ke, kev_link);
495
496 /* kick the handler */
497 wakeup(smbkqp);
498 }
499
500 /* XXXLUKEM lock the struct? */
501 SLIST_INSERT_HEAD(&vp->v_klist, kn, kn_selnext);
502 kn->kn_hook = ke;
503
504 simple_unlock(&smbkq_lock);
505
506 return (0);
507 }
Cache object: cd0fc55c5dd096d99457391ea438ae3c
|